2024-11-19 12:17:36,975 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-19 12:17:36,993 main DEBUG Took 0.016343 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-19 12:17:36,994 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-19 12:17:36,994 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-19 12:17:36,995 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-19 12:17:36,996 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:17:37,002 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-19 12:17:37,014 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:17:37,015 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:17:37,016 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:17:37,016 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:17:37,017 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:17:37,017 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:17:37,017 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:17:37,018 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:17:37,018 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:17:37,018 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:17:37,019 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:17:37,019 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:17:37,020 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:17:37,020 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:17:37,021 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:17:37,021 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:17:37,021 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:17:37,021 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:17:37,022 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:17:37,022 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:17:37,022 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:17:37,023 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:17:37,023 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:17:37,023 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:17:37,024 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:17:37,024 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-19 12:17:37,025 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:17:37,026 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-19 12:17:37,028 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-19 12:17:37,028 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-19 12:17:37,029 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-19 12:17:37,030 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-19 12:17:37,038 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-19 12:17:37,040 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-19 12:17:37,042 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-19 12:17:37,042 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-19 12:17:37,043 main DEBUG createAppenders(={Console}) 2024-11-19 12:17:37,043 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-19 12:17:37,044 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-19 12:17:37,044 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-19 12:17:37,044 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-19 12:17:37,045 main DEBUG OutputStream closed 2024-11-19 12:17:37,045 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-19 12:17:37,045 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-19 12:17:37,045 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-19 12:17:37,111 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-19 12:17:37,113 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-19 12:17:37,114 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-19 12:17:37,115 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-19 12:17:37,115 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-19 12:17:37,115 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-19 12:17:37,116 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-19 12:17:37,116 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-19 12:17:37,116 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-19 12:17:37,116 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-19 12:17:37,117 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-19 12:17:37,117 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-19 12:17:37,117 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-19 12:17:37,117 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-19 12:17:37,118 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-19 12:17:37,118 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-19 12:17:37,118 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-19 12:17:37,119 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-19 12:17:37,121 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-19 12:17:37,121 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-19 12:17:37,122 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-19 12:17:37,122 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-19T12:17:37,343 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255 2024-11-19 12:17:37,346 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-19 12:17:37,346 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-19T12:17:37,355 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithBasicPolicy timeout: 13 mins 2024-11-19T12:17:37,373 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T12:17:37,376 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/cluster_516133e5-12a5-9390-7554-f37f555213db, deleteOnExit=true 2024-11-19T12:17:37,376 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-19T12:17:37,377 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/test.cache.data in system properties and HBase conf 2024-11-19T12:17:37,377 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T12:17:37,378 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/hadoop.log.dir in system properties and HBase conf 2024-11-19T12:17:37,378 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T12:17:37,379 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T12:17:37,379 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-19T12:17:37,466 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-19T12:17:37,550 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T12:17:37,554 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:17:37,554 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:17:37,555 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T12:17:37,555 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:17:37,556 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T12:17:37,556 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T12:17:37,556 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:17:37,557 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:17:37,557 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T12:17:37,557 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/nfs.dump.dir in system properties and HBase conf 2024-11-19T12:17:37,558 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/java.io.tmpdir in system properties and HBase conf 2024-11-19T12:17:37,558 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:17:37,558 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T12:17:37,559 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T12:17:38,339 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-19T12:17:38,411 INFO [Time-limited test {}] log.Log(170): Logging initialized @2106ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-19T12:17:38,483 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:17:38,544 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:17:38,564 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:17:38,564 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:17:38,566 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:17:38,579 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:17:38,582 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@106ffc0e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:17:38,583 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@704acb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:17:38,775 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6904431c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/java.io.tmpdir/jetty-localhost-45321-hadoop-hdfs-3_4_1-tests_jar-_-any-5194755582128488946/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:17:38,782 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20178447{HTTP/1.1, (http/1.1)}{localhost:45321} 2024-11-19T12:17:38,782 INFO [Time-limited test {}] server.Server(415): Started @2478ms 2024-11-19T12:17:39,164 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:17:39,171 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:17:39,172 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:17:39,172 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:17:39,172 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:17:39,173 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ac85cee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:17:39,174 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74536f23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:17:39,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@29607158{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/java.io.tmpdir/jetty-localhost-37323-hadoop-hdfs-3_4_1-tests_jar-_-any-17789821697506711490/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:17:39,296 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@76b7aca8{HTTP/1.1, (http/1.1)}{localhost:37323} 2024-11-19T12:17:39,296 INFO [Time-limited test {}] server.Server(415): Started @2992ms 2024-11-19T12:17:39,352 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:17:39,830 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/cluster_516133e5-12a5-9390-7554-f37f555213db/dfs/data/data1/current/BP-1970135351-172.17.0.2-1732018658116/current, will proceed with Du for space computation calculation, 2024-11-19T12:17:39,830 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/cluster_516133e5-12a5-9390-7554-f37f555213db/dfs/data/data2/current/BP-1970135351-172.17.0.2-1732018658116/current, will proceed with Du for space computation calculation, 2024-11-19T12:17:39,870 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:17:39,930 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255 2024-11-19T12:17:39,931 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2dcb725635915abf with lease ID 0x79243052e715a72a: Processing first storage report for DS-ca201142-680b-4e29-933e-1da7184929c1 from datanode DatanodeRegistration(127.0.0.1:46369, datanodeUuid=204d5fbe-fbbf-4948-802d-73a762d9c991, infoPort=45339, infoSecurePort=0, ipcPort=40867, storageInfo=lv=-57;cid=testClusterID;nsid=1768527450;c=1732018658116) 2024-11-19T12:17:39,932 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2dcb725635915abf with lease ID 0x79243052e715a72a: from storage DS-ca201142-680b-4e29-933e-1da7184929c1 node DatanodeRegistration(127.0.0.1:46369, datanodeUuid=204d5fbe-fbbf-4948-802d-73a762d9c991, infoPort=45339, infoSecurePort=0, ipcPort=40867, storageInfo=lv=-57;cid=testClusterID;nsid=1768527450;c=1732018658116), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T12:17:39,933 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2dcb725635915abf with lease ID 0x79243052e715a72a: Processing first storage report for DS-5337ad03-5fef-4ac5-86b9-2105b7c9ee71 from datanode DatanodeRegistration(127.0.0.1:46369, datanodeUuid=204d5fbe-fbbf-4948-802d-73a762d9c991, infoPort=45339, infoSecurePort=0, ipcPort=40867, storageInfo=lv=-57;cid=testClusterID;nsid=1768527450;c=1732018658116) 2024-11-19T12:17:39,933 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2dcb725635915abf with lease ID 0x79243052e715a72a: from storage DS-5337ad03-5fef-4ac5-86b9-2105b7c9ee71 node DatanodeRegistration(127.0.0.1:46369, datanodeUuid=204d5fbe-fbbf-4948-802d-73a762d9c991, infoPort=45339, infoSecurePort=0, ipcPort=40867, storageInfo=lv=-57;cid=testClusterID;nsid=1768527450;c=1732018658116), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:17:40,006 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/cluster_516133e5-12a5-9390-7554-f37f555213db/zookeeper_0, clientPort=64186, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/cluster_516133e5-12a5-9390-7554-f37f555213db/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/cluster_516133e5-12a5-9390-7554-f37f555213db/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T12:17:40,017 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=64186 2024-11-19T12:17:40,031 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:17:40,035 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:17:40,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:17:40,668 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22 with version=8 2024-11-19T12:17:40,669 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/hbase-staging 2024-11-19T12:17:40,788 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-19T12:17:41,037 INFO [Time-limited test {}] client.ConnectionUtils(129): master/af314c41f984:0 server-side Connection retries=45 2024-11-19T12:17:41,055 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:17:41,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:17:41,056 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:17:41,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:17:41,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:17:41,181 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T12:17:41,238 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-19T12:17:41,247 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-19T12:17:41,251 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:17:41,277 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 15696 (auto-detected) 2024-11-19T12:17:41,278 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-19T12:17:41,296 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:37977 2024-11-19T12:17:41,304 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:17:41,306 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:17:41,318 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:37977 connecting to ZooKeeper ensemble=127.0.0.1:64186 2024-11-19T12:17:41,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:379770x0, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:17:41,351 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37977-0x1000fb71b770000 connected 2024-11-19T12:17:41,379 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T12:17:41,382 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:17:41,384 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:17:41,388 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37977 2024-11-19T12:17:41,389 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37977 2024-11-19T12:17:41,389 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37977 2024-11-19T12:17:41,390 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37977 2024-11-19T12:17:41,390 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37977 2024-11-19T12:17:41,397 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22, hbase.cluster.distributed=false 2024-11-19T12:17:41,456 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/af314c41f984:0 server-side Connection retries=45 2024-11-19T12:17:41,457 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:17:41,457 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:17:41,457 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:17:41,457 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:17:41,457 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:17:41,459 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T12:17:41,461 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:17:41,462 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36047 2024-11-19T12:17:41,464 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T12:17:41,469 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T12:17:41,471 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:17:41,475 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:17:41,480 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:36047 connecting to ZooKeeper ensemble=127.0.0.1:64186 2024-11-19T12:17:41,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:360470x0, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:17:41,485 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36047-0x1000fb71b770001 connected 2024-11-19T12:17:41,485 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T12:17:41,487 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:17:41,488 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:17:41,489 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36047 2024-11-19T12:17:41,489 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36047 2024-11-19T12:17:41,490 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36047 2024-11-19T12:17:41,491 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36047 2024-11-19T12:17:41,491 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36047 2024-11-19T12:17:41,496 INFO [master/af314c41f984:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/af314c41f984,37977,1732018660782 2024-11-19T12:17:41,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:17:41,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:17:41,504 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/af314c41f984,37977,1732018660782 2024-11-19T12:17:41,513 DEBUG [M:0;af314c41f984:37977 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;af314c41f984:37977 2024-11-19T12:17:41,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T12:17:41,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T12:17:41,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:17:41,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:17:41,525 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T12:17:41,526 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T12:17:41,526 INFO [master/af314c41f984:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/af314c41f984,37977,1732018660782 from backup master directory 2024-11-19T12:17:41,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/af314c41f984,37977,1732018660782 2024-11-19T12:17:41,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:17:41,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:17:41,530 WARN [master/af314c41f984:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:17:41,530 INFO [master/af314c41f984:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=af314c41f984,37977,1732018660782 2024-11-19T12:17:41,532 INFO [master/af314c41f984:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-19T12:17:41,533 INFO [master/af314c41f984:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-19T12:17:41,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:17:41,999 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/hbase.id with ID: ee58a9bb-160d-4d98-a891-dbd9db786fa3 2024-11-19T12:17:42,039 INFO [master/af314c41f984:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:17:42,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:17:42,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:17:42,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:17:42,496 INFO [master/af314c41f984:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:17:42,499 INFO [master/af314c41f984:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T12:17:42,516 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:42,521 INFO [master/af314c41f984:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-19T12:17:42,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:17:42,967 INFO [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store 2024-11-19T12:17:42,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:17:43,388 INFO [master/af314c41f984:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-19T12:17:43,388 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:17:43,390 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:17:43,390 INFO [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:17:43,390 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:17:43,391 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 1 ms 2024-11-19T12:17:43,391 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:17:43,391 INFO [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:17:43,391 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-19T12:17:43,394 WARN [master/af314c41f984:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/.initializing 2024-11-19T12:17:43,394 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/WALs/af314c41f984,37977,1732018660782 2024-11-19T12:17:43,400 INFO [master/af314c41f984:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-19T12:17:43,414 INFO [master/af314c41f984:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=af314c41f984%2C37977%2C1732018660782, suffix=, logDir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/WALs/af314c41f984,37977,1732018660782, archiveDir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/oldWALs, maxLogs=10 2024-11-19T12:17:43,447 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/WALs/af314c41f984,37977,1732018660782/af314c41f984%2C37977%2C1732018660782.1732018663421, exclude list is [], retry=0 2024-11-19T12:17:43,473 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46369,DS-ca201142-680b-4e29-933e-1da7184929c1,DISK] 2024-11-19T12:17:43,478 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-19T12:17:43,530 INFO [master/af314c41f984:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/WALs/af314c41f984,37977,1732018660782/af314c41f984%2C37977%2C1732018660782.1732018663421 2024-11-19T12:17:43,531 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45339:45339)] 2024-11-19T12:17:43,532 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:17:43,532 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:17:43,536 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:17:43,537 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:17:43,590 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:17:43,622 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T12:17:43,627 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:17:43,631 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:17:43,631 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:17:43,641 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T12:17:43,641 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:17:43,643 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:17:43,643 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:17:43,647 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T12:17:43,647 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:17:43,648 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:17:43,649 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:17:43,652 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T12:17:43,652 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:17:43,654 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:17:43,659 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:17:43,661 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:17:43,673 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T12:17:43,678 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:17:43,686 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:17:43,687 INFO [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61840940, jitterRate=-0.07849818468093872}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T12:17:43,693 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-19T12:17:43,695 INFO [master/af314c41f984:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T12:17:43,736 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d3bf9c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:17:43,782 INFO [master/af314c41f984:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-19T12:17:43,799 INFO [master/af314c41f984:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T12:17:43,799 INFO [master/af314c41f984:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T12:17:43,802 INFO [master/af314c41f984:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T12:17:43,804 INFO [master/af314c41f984:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-19T12:17:43,811 INFO [master/af314c41f984:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 6 msec 2024-11-19T12:17:43,812 INFO [master/af314c41f984:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T12:17:43,847 INFO [master/af314c41f984:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T12:17:43,862 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T12:17:43,866 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-19T12:17:43,869 INFO [master/af314c41f984:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T12:17:43,870 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T12:17:43,874 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-19T12:17:43,877 INFO [master/af314c41f984:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T12:17:43,881 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T12:17:43,884 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-19T12:17:43,885 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T12:17:43,887 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T12:17:43,899 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T12:17:43,901 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T12:17:43,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:17:43,910 INFO [master/af314c41f984:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=af314c41f984,37977,1732018660782, sessionid=0x1000fb71b770000, setting cluster-up flag (Was=false) 2024-11-19T12:17:43,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:17:43,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:17:43,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:17:43,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:17:43,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:17:43,936 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T12:17:43,938 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=af314c41f984,37977,1732018660782 2024-11-19T12:17:43,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:17:43,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:17:43,949 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T12:17:43,954 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=af314c41f984,37977,1732018660782 2024-11-19T12:17:44,019 DEBUG [RS:0;af314c41f984:36047 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;af314c41f984:36047 2024-11-19T12:17:44,021 INFO [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1008): ClusterId : ee58a9bb-160d-4d98-a891-dbd9db786fa3 2024-11-19T12:17:44,026 DEBUG [RS:0;af314c41f984:36047 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T12:17:44,034 DEBUG [RS:0;af314c41f984:36047 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T12:17:44,034 DEBUG [RS:0;af314c41f984:36047 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T12:17:44,046 DEBUG [RS:0;af314c41f984:36047 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T12:17:44,048 DEBUG [RS:0;af314c41f984:36047 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f70d3fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:17:44,064 DEBUG [RS:0;af314c41f984:36047 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1caa45bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=af314c41f984/172.17.0.2:0 2024-11-19T12:17:44,069 INFO [RS:0;af314c41f984:36047 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-19T12:17:44,069 INFO [RS:0;af314c41f984:36047 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-19T12:17:44,069 DEBUG [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-19T12:17:44,078 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-19T12:17:44,081 INFO [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(3073): reportForDuty to master=af314c41f984,37977,1732018660782 with isa=af314c41f984/172.17.0.2:36047, startcode=1732018661455 2024-11-19T12:17:44,086 INFO [master/af314c41f984:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-19T12:17:44,090 INFO [master/af314c41f984:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T12:17:44,098 DEBUG [RS:0;af314c41f984:36047 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T12:17:44,098 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: af314c41f984,37977,1732018660782 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T12:17:44,102 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/af314c41f984:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:17:44,103 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/af314c41f984:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:17:44,103 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/af314c41f984:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:17:44,103 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/af314c41f984:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:17:44,104 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/af314c41f984:0, corePoolSize=10, maxPoolSize=10 2024-11-19T12:17:44,104 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/af314c41f984:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:17:44,104 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/af314c41f984:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:17:44,105 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/af314c41f984:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:17:44,112 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:17:44,113 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-19T12:17:44,120 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:17:44,120 INFO [master/af314c41f984:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732018694120 2024-11-19T12:17:44,122 INFO [master/af314c41f984:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T12:17:44,123 INFO [master/af314c41f984:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T12:17:44,120 INFO [PEWorker-2 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T12:17:44,128 INFO [master/af314c41f984:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T12:17:44,128 INFO [master/af314c41f984:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T12:17:44,129 INFO [master/af314c41f984:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T12:17:44,129 INFO [master/af314c41f984:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T12:17:44,140 INFO [master/af314c41f984:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:17:44,142 INFO [master/af314c41f984:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T12:17:44,144 INFO [master/af314c41f984:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T12:17:44,145 INFO [master/af314c41f984:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T12:17:44,151 INFO [master/af314c41f984:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T12:17:44,152 INFO [master/af314c41f984:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T12:17:44,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741831_1007 (size=1039) 2024-11-19T12:17:44,163 INFO [PEWorker-2 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-19T12:17:44,163 INFO [PEWorker-2 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22 2024-11-19T12:17:44,164 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/af314c41f984:0:becomeActiveMaster-HFileCleaner.large.0-1732018664154,5,FailOnTimeoutGroup] 2024-11-19T12:17:44,168 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/af314c41f984:0:becomeActiveMaster-HFileCleaner.small.0-1732018664164,5,FailOnTimeoutGroup] 2024-11-19T12:17:44,168 INFO [master/af314c41f984:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:17:44,168 INFO [master/af314c41f984:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T12:17:44,171 INFO [master/af314c41f984:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T12:17:44,173 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60263, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T12:17:44,175 INFO [master/af314c41f984:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T12:17:44,181 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37977 {}] master.ServerManager(332): Checking decommissioned status of RegionServer af314c41f984,36047,1732018661455 2024-11-19T12:17:44,184 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37977 {}] master.ServerManager(486): Registering regionserver=af314c41f984,36047,1732018661455 2024-11-19T12:17:44,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:17:44,203 DEBUG [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22 2024-11-19T12:17:44,203 DEBUG [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:46379 2024-11-19T12:17:44,203 DEBUG [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-19T12:17:44,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:17:44,211 DEBUG [RS:0;af314c41f984:36047 {}] zookeeper.ZKUtil(111): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/af314c41f984,36047,1732018661455 2024-11-19T12:17:44,211 WARN [RS:0;af314c41f984:36047 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:17:44,211 INFO [RS:0;af314c41f984:36047 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-19T12:17:44,211 DEBUG [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/WALs/af314c41f984,36047,1732018661455 2024-11-19T12:17:44,214 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [af314c41f984,36047,1732018661455] 2024-11-19T12:17:44,232 DEBUG [RS:0;af314c41f984:36047 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-19T12:17:44,247 INFO [RS:0;af314c41f984:36047 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T12:17:44,264 INFO [RS:0;af314c41f984:36047 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T12:17:44,268 INFO [RS:0;af314c41f984:36047 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T12:17:44,268 INFO [RS:0;af314c41f984:36047 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:17:44,272 INFO [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-19T12:17:44,284 INFO [RS:0;af314c41f984:36047 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T12:17:44,284 DEBUG [RS:0;af314c41f984:36047 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/af314c41f984:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:17:44,284 DEBUG [RS:0;af314c41f984:36047 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/af314c41f984:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:17:44,285 DEBUG [RS:0;af314c41f984:36047 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/af314c41f984:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:17:44,285 DEBUG [RS:0;af314c41f984:36047 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/af314c41f984:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:17:44,285 DEBUG [RS:0;af314c41f984:36047 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/af314c41f984:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:17:44,285 DEBUG [RS:0;af314c41f984:36047 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/af314c41f984:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:17:44,286 DEBUG [RS:0;af314c41f984:36047 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:17:44,286 DEBUG [RS:0;af314c41f984:36047 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/af314c41f984:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:17:44,287 DEBUG [RS:0;af314c41f984:36047 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/af314c41f984:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:17:44,287 DEBUG [RS:0;af314c41f984:36047 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/af314c41f984:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:17:44,287 DEBUG [RS:0;af314c41f984:36047 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/af314c41f984:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:17:44,287 DEBUG [RS:0;af314c41f984:36047 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/af314c41f984:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:17:44,288 DEBUG [RS:0;af314c41f984:36047 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:17:44,290 INFO [RS:0;af314c41f984:36047 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:17:44,291 INFO [RS:0;af314c41f984:36047 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:17:44,291 INFO [RS:0;af314c41f984:36047 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T12:17:44,291 INFO [RS:0;af314c41f984:36047 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T12:17:44,291 INFO [RS:0;af314c41f984:36047 {}] hbase.ChoreService(168): Chore ScheduledChore name=af314c41f984,36047,1732018661455-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:17:44,319 INFO [RS:0;af314c41f984:36047 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T12:17:44,323 INFO [RS:0;af314c41f984:36047 {}] hbase.ChoreService(168): Chore ScheduledChore name=af314c41f984,36047,1732018661455-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:17:44,349 INFO [RS:0;af314c41f984:36047 {}] regionserver.Replication(204): af314c41f984,36047,1732018661455 started 2024-11-19T12:17:44,349 INFO [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1767): Serving as af314c41f984,36047,1732018661455, RpcServer on af314c41f984/172.17.0.2:36047, sessionid=0x1000fb71b770001 2024-11-19T12:17:44,350 DEBUG [RS:0;af314c41f984:36047 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T12:17:44,350 DEBUG [RS:0;af314c41f984:36047 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager af314c41f984,36047,1732018661455 2024-11-19T12:17:44,350 DEBUG [RS:0;af314c41f984:36047 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'af314c41f984,36047,1732018661455' 2024-11-19T12:17:44,350 DEBUG [RS:0;af314c41f984:36047 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T12:17:44,351 DEBUG [RS:0;af314c41f984:36047 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T12:17:44,352 DEBUG [RS:0;af314c41f984:36047 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T12:17:44,352 DEBUG [RS:0;af314c41f984:36047 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T12:17:44,352 DEBUG [RS:0;af314c41f984:36047 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager af314c41f984,36047,1732018661455 2024-11-19T12:17:44,352 DEBUG [RS:0;af314c41f984:36047 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'af314c41f984,36047,1732018661455' 2024-11-19T12:17:44,352 DEBUG [RS:0;af314c41f984:36047 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T12:17:44,353 DEBUG [RS:0;af314c41f984:36047 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T12:17:44,354 DEBUG [RS:0;af314c41f984:36047 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T12:17:44,355 INFO [RS:0;af314c41f984:36047 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T12:17:44,355 INFO [RS:0;af314c41f984:36047 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T12:17:44,462 INFO [RS:0;af314c41f984:36047 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-19T12:17:44,467 INFO [RS:0;af314c41f984:36047 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=af314c41f984%2C36047%2C1732018661455, suffix=, logDir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/WALs/af314c41f984,36047,1732018661455, archiveDir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/oldWALs, maxLogs=32 2024-11-19T12:17:44,488 DEBUG [RS:0;af314c41f984:36047 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/WALs/af314c41f984,36047,1732018661455/af314c41f984%2C36047%2C1732018661455.1732018664470, exclude list is [], retry=0 2024-11-19T12:17:44,495 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46369,DS-ca201142-680b-4e29-933e-1da7184929c1,DISK] 2024-11-19T12:17:44,501 INFO [RS:0;af314c41f984:36047 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/WALs/af314c41f984,36047,1732018661455/af314c41f984%2C36047%2C1732018661455.1732018664470 2024-11-19T12:17:44,502 DEBUG [RS:0;af314c41f984:36047 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45339:45339)] 2024-11-19T12:17:44,601 DEBUG [PEWorker-2 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:17:44,605 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:17:44,620 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:17:44,621 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:17:44,622 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:17:44,623 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:17:44,638 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:17:44,638 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:17:44,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:17:44,640 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:17:44,644 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:17:44,644 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:17:44,645 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:17:44,647 DEBUG [PEWorker-2 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740 2024-11-19T12:17:44,648 DEBUG [PEWorker-2 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740 2024-11-19T12:17:44,652 DEBUG [PEWorker-2 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:17:44,658 DEBUG [PEWorker-2 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-19T12:17:44,667 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:17:44,669 INFO [PEWorker-2 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63157211, jitterRate=-0.05888421833515167}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:17:44,673 DEBUG [PEWorker-2 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-19T12:17:44,673 DEBUG [PEWorker-2 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:17:44,673 INFO [PEWorker-2 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-19T12:17:44,673 DEBUG [PEWorker-2 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-19T12:17:44,673 DEBUG [PEWorker-2 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:17:44,673 DEBUG [PEWorker-2 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:17:44,676 INFO [PEWorker-2 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-19T12:17:44,676 DEBUG [PEWorker-2 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-19T12:17:44,680 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:17:44,680 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-19T12:17:44,688 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T12:17:44,700 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:17:44,704 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T12:17:44,857 DEBUG [af314c41f984:37977 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T12:17:44,863 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:17:44,870 INFO [PEWorker-1 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as af314c41f984,36047,1732018661455, state=OPENING 2024-11-19T12:17:44,877 DEBUG [PEWorker-1 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T12:17:44,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:17:44,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:17:44,881 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:17:44,881 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:17:44,883 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:17:45,066 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:45,069 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T12:17:45,082 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T12:17:45,094 INFO [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-19T12:17:45,095 INFO [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-19T12:17:45,095 INFO [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-19T12:17:45,099 INFO [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=af314c41f984%2C36047%2C1732018661455.meta, suffix=.meta, logDir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/WALs/af314c41f984,36047,1732018661455, archiveDir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/oldWALs, maxLogs=32 2024-11-19T12:17:45,121 DEBUG [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/WALs/af314c41f984,36047,1732018661455/af314c41f984%2C36047%2C1732018661455.meta.1732018665102.meta, exclude list is [], retry=0 2024-11-19T12:17:45,126 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46369,DS-ca201142-680b-4e29-933e-1da7184929c1,DISK] 2024-11-19T12:17:45,132 INFO [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/WALs/af314c41f984,36047,1732018661455/af314c41f984%2C36047%2C1732018661455.meta.1732018665102.meta 2024-11-19T12:17:45,133 DEBUG [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45339:45339)] 2024-11-19T12:17:45,133 DEBUG [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:17:45,135 DEBUG [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T12:17:45,205 DEBUG [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T12:17:45,212 INFO [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T12:17:45,217 DEBUG [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T12:17:45,217 DEBUG [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:17:45,218 DEBUG [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-19T12:17:45,218 DEBUG [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-19T12:17:45,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:17:45,224 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:17:45,224 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:17:45,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:17:45,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:17:45,227 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:17:45,227 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:17:45,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:17:45,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:17:45,230 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:17:45,230 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:17:45,231 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:17:45,233 DEBUG [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740 2024-11-19T12:17:45,238 DEBUG [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740 2024-11-19T12:17:45,241 DEBUG [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:17:45,245 DEBUG [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-19T12:17:45,248 INFO [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61177593, jitterRate=-0.08838282525539398}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:17:45,250 DEBUG [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-19T12:17:45,260 INFO [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732018665059 2024-11-19T12:17:45,274 DEBUG [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T12:17:45,274 INFO [RS_OPEN_META-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-19T12:17:45,276 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:17:45,278 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as af314c41f984,36047,1732018661455, state=OPEN 2024-11-19T12:17:45,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:17:45,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:17:45,287 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:17:45,287 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:17:45,293 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T12:17:45,293 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=af314c41f984,36047,1732018661455 in 404 msec 2024-11-19T12:17:45,301 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T12:17:45,301 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 607 msec 2024-11-19T12:17:45,307 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.2930 sec 2024-11-19T12:17:45,308 INFO [master/af314c41f984:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732018665308, completionTime=-1 2024-11-19T12:17:45,308 INFO [master/af314c41f984:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T12:17:45,309 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-19T12:17:45,354 DEBUG [hconnection-0x5525568c-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:17:45,356 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41218, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:17:45,370 INFO [master/af314c41f984:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-19T12:17:45,370 INFO [master/af314c41f984:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732018725370 2024-11-19T12:17:45,370 INFO [master/af314c41f984:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732018785370 2024-11-19T12:17:45,370 INFO [master/af314c41f984:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 61 msec 2024-11-19T12:17:45,399 INFO [master/af314c41f984:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=af314c41f984,37977,1732018660782-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:17:45,399 INFO [master/af314c41f984:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=af314c41f984,37977,1732018660782-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:17:45,400 INFO [master/af314c41f984:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=af314c41f984,37977,1732018660782-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:17:45,401 INFO [master/af314c41f984:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-af314c41f984:37977, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:17:45,402 INFO [master/af314c41f984:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T12:17:45,408 DEBUG [master/af314c41f984:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-19T12:17:45,410 INFO [master/af314c41f984:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-19T12:17:45,411 INFO [master/af314c41f984:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T12:17:45,417 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-19T12:17:45,421 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T12:17:45,422 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:17:45,424 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T12:17:45,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741835_1011 (size=358) 2024-11-19T12:17:45,441 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => dec5b3d3b76c616b09a7c531fa488ba4, NAME => 'hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22 2024-11-19T12:17:45,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741836_1012 (size=42) 2024-11-19T12:17:45,858 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:17:45,858 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing dec5b3d3b76c616b09a7c531fa488ba4, disabling compactions & flushes 2024-11-19T12:17:45,858 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4. 2024-11-19T12:17:45,858 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4. 2024-11-19T12:17:45,859 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4. after waiting 0 ms 2024-11-19T12:17:45,859 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4. 2024-11-19T12:17:45,859 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4. 2024-11-19T12:17:45,859 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for dec5b3d3b76c616b09a7c531fa488ba4: 2024-11-19T12:17:45,861 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T12:17:45,868 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732018665862"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732018665862"}]},"ts":"1732018665862"} 2024-11-19T12:17:45,893 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-19T12:17:45,895 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T12:17:45,898 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018665895"}]},"ts":"1732018665895"} 2024-11-19T12:17:45,903 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-19T12:17:45,910 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=dec5b3d3b76c616b09a7c531fa488ba4, ASSIGN}] 2024-11-19T12:17:45,912 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=dec5b3d3b76c616b09a7c531fa488ba4, ASSIGN 2024-11-19T12:17:45,914 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=dec5b3d3b76c616b09a7c531fa488ba4, ASSIGN; state=OFFLINE, location=af314c41f984,36047,1732018661455; forceNewPlan=false, retain=false 2024-11-19T12:17:46,065 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=dec5b3d3b76c616b09a7c531fa488ba4, regionState=OPENING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:17:46,069 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure dec5b3d3b76c616b09a7c531fa488ba4, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:17:46,225 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:46,233 INFO [RS_OPEN_PRIORITY_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4. 2024-11-19T12:17:46,233 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => dec5b3d3b76c616b09a7c531fa488ba4, NAME => 'hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:17:46,234 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace dec5b3d3b76c616b09a7c531fa488ba4 2024-11-19T12:17:46,234 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:17:46,234 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for dec5b3d3b76c616b09a7c531fa488ba4 2024-11-19T12:17:46,234 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for dec5b3d3b76c616b09a7c531fa488ba4 2024-11-19T12:17:46,237 INFO [StoreOpener-dec5b3d3b76c616b09a7c531fa488ba4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region dec5b3d3b76c616b09a7c531fa488ba4 2024-11-19T12:17:46,240 INFO [StoreOpener-dec5b3d3b76c616b09a7c531fa488ba4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dec5b3d3b76c616b09a7c531fa488ba4 columnFamilyName info 2024-11-19T12:17:46,240 DEBUG [StoreOpener-dec5b3d3b76c616b09a7c531fa488ba4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:17:46,241 INFO [StoreOpener-dec5b3d3b76c616b09a7c531fa488ba4-1 {}] regionserver.HStore(327): Store=dec5b3d3b76c616b09a7c531fa488ba4/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:17:46,242 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/namespace/dec5b3d3b76c616b09a7c531fa488ba4 2024-11-19T12:17:46,243 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/namespace/dec5b3d3b76c616b09a7c531fa488ba4 2024-11-19T12:17:46,247 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for dec5b3d3b76c616b09a7c531fa488ba4 2024-11-19T12:17:46,250 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/namespace/dec5b3d3b76c616b09a7c531fa488ba4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:17:46,251 INFO [RS_OPEN_PRIORITY_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened dec5b3d3b76c616b09a7c531fa488ba4; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60848462, jitterRate=-0.09328725934028625}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T12:17:46,253 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for dec5b3d3b76c616b09a7c531fa488ba4: 2024-11-19T12:17:46,255 INFO [RS_OPEN_PRIORITY_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4., pid=6, masterSystemTime=1732018666225 2024-11-19T12:17:46,259 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4. 2024-11-19T12:17:46,260 INFO [RS_OPEN_PRIORITY_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4. 2024-11-19T12:17:46,261 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=dec5b3d3b76c616b09a7c531fa488ba4, regionState=OPEN, openSeqNum=2, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:17:46,269 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T12:17:46,271 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure dec5b3d3b76c616b09a7c531fa488ba4, server=af314c41f984,36047,1732018661455 in 196 msec 2024-11-19T12:17:46,274 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T12:17:46,274 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=dec5b3d3b76c616b09a7c531fa488ba4, ASSIGN in 361 msec 2024-11-19T12:17:46,276 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T12:17:46,276 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018666276"}]},"ts":"1732018666276"} 2024-11-19T12:17:46,279 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-19T12:17:46,284 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T12:17:46,288 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 872 msec 2024-11-19T12:17:46,322 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-19T12:17:46,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-19T12:17:46,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:17:46,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:17:46,359 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-19T12:17:46,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-19T12:17:46,383 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 28 msec 2024-11-19T12:17:46,395 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-19T12:17:46,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-19T12:17:46,415 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 19 msec 2024-11-19T12:17:46,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-19T12:17:46,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-19T12:17:46,426 INFO [master/af314c41f984:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.896sec 2024-11-19T12:17:46,428 INFO [master/af314c41f984:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T12:17:46,430 INFO [master/af314c41f984:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T12:17:46,431 INFO [master/af314c41f984:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T12:17:46,432 INFO [master/af314c41f984:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T12:17:46,432 INFO [master/af314c41f984:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T12:17:46,433 INFO [master/af314c41f984:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=af314c41f984,37977,1732018660782-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:17:46,434 INFO [master/af314c41f984:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=af314c41f984,37977,1732018660782-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T12:17:46,442 DEBUG [master/af314c41f984:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-19T12:17:46,443 INFO [master/af314c41f984:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T12:17:46,443 INFO [master/af314c41f984:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=af314c41f984,37977,1732018660782-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:17:46,513 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x63dfbe60 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5fd8052b 2024-11-19T12:17:46,514 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-19T12:17:46,521 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@322810c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:17:46,525 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-19T12:17:46,525 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-19T12:17:46,535 DEBUG [hconnection-0x320da16c-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:17:46,544 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41228, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:17:46,555 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=af314c41f984,37977,1732018660782 2024-11-19T12:17:46,573 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=148, ProcessCount=11, AvailableMemoryMB=2615 2024-11-19T12:17:46,604 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T12:17:46,611 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45444, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T12:17:46,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-19T12:17:46,625 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:17:46,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-19T12:17:46,630 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T12:17:46,631 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:17:46,631 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-19T12:17:46,633 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T12:17:46,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-19T12:17:46,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741837_1013 (size=960) 2024-11-19T12:17:46,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-19T12:17:46,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-19T12:17:47,055 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22 2024-11-19T12:17:47,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741838_1014 (size=53) 2024-11-19T12:17:47,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-19T12:17:47,466 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:17:47,466 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 1bd7f6746cebf2fb7e39737ab25d16cc, disabling compactions & flushes 2024-11-19T12:17:47,466 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:47,466 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:47,466 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. after waiting 0 ms 2024-11-19T12:17:47,466 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:47,466 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:47,466 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:47,468 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T12:17:47,468 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732018667468"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732018667468"}]},"ts":"1732018667468"} 2024-11-19T12:17:47,471 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-19T12:17:47,473 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T12:17:47,473 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018667473"}]},"ts":"1732018667473"} 2024-11-19T12:17:47,475 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-19T12:17:47,480 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1bd7f6746cebf2fb7e39737ab25d16cc, ASSIGN}] 2024-11-19T12:17:47,481 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1bd7f6746cebf2fb7e39737ab25d16cc, ASSIGN 2024-11-19T12:17:47,483 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=1bd7f6746cebf2fb7e39737ab25d16cc, ASSIGN; state=OFFLINE, location=af314c41f984,36047,1732018661455; forceNewPlan=false, retain=false 2024-11-19T12:17:47,633 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=1bd7f6746cebf2fb7e39737ab25d16cc, regionState=OPENING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:17:47,637 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:17:47,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-19T12:17:47,790 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:47,796 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:47,797 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:17:47,797 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:47,797 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:17:47,797 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:47,797 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:47,800 INFO [StoreOpener-1bd7f6746cebf2fb7e39737ab25d16cc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:47,802 INFO [StoreOpener-1bd7f6746cebf2fb7e39737ab25d16cc-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:17:47,803 INFO [StoreOpener-1bd7f6746cebf2fb7e39737ab25d16cc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1bd7f6746cebf2fb7e39737ab25d16cc columnFamilyName A 2024-11-19T12:17:47,803 DEBUG [StoreOpener-1bd7f6746cebf2fb7e39737ab25d16cc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:17:47,804 INFO [StoreOpener-1bd7f6746cebf2fb7e39737ab25d16cc-1 {}] regionserver.HStore(327): Store=1bd7f6746cebf2fb7e39737ab25d16cc/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:17:47,804 INFO [StoreOpener-1bd7f6746cebf2fb7e39737ab25d16cc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:47,806 INFO [StoreOpener-1bd7f6746cebf2fb7e39737ab25d16cc-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:17:47,806 INFO [StoreOpener-1bd7f6746cebf2fb7e39737ab25d16cc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1bd7f6746cebf2fb7e39737ab25d16cc columnFamilyName B 2024-11-19T12:17:47,806 DEBUG [StoreOpener-1bd7f6746cebf2fb7e39737ab25d16cc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:17:47,807 INFO [StoreOpener-1bd7f6746cebf2fb7e39737ab25d16cc-1 {}] regionserver.HStore(327): Store=1bd7f6746cebf2fb7e39737ab25d16cc/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:17:47,807 INFO [StoreOpener-1bd7f6746cebf2fb7e39737ab25d16cc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:47,809 INFO [StoreOpener-1bd7f6746cebf2fb7e39737ab25d16cc-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:17:47,809 INFO [StoreOpener-1bd7f6746cebf2fb7e39737ab25d16cc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1bd7f6746cebf2fb7e39737ab25d16cc columnFamilyName C 2024-11-19T12:17:47,809 DEBUG [StoreOpener-1bd7f6746cebf2fb7e39737ab25d16cc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:17:47,810 INFO [StoreOpener-1bd7f6746cebf2fb7e39737ab25d16cc-1 {}] regionserver.HStore(327): Store=1bd7f6746cebf2fb7e39737ab25d16cc/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:17:47,810 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:47,811 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:47,812 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:47,814 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:17:47,816 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:47,819 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:17:47,820 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 1bd7f6746cebf2fb7e39737ab25d16cc; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59184032, jitterRate=-0.11808919906616211}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:17:47,821 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:47,822 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., pid=11, masterSystemTime=1732018667790 2024-11-19T12:17:47,825 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:47,825 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:47,826 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=1bd7f6746cebf2fb7e39737ab25d16cc, regionState=OPEN, openSeqNum=2, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:17:47,832 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-19T12:17:47,832 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 in 192 msec 2024-11-19T12:17:47,835 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-19T12:17:47,835 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1bd7f6746cebf2fb7e39737ab25d16cc, ASSIGN in 352 msec 2024-11-19T12:17:47,836 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T12:17:47,836 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018667836"}]},"ts":"1732018667836"} 2024-11-19T12:17:47,838 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-19T12:17:47,841 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T12:17:47,844 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2150 sec 2024-11-19T12:17:48,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-19T12:17:48,758 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-19T12:17:48,763 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6be4168e to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ed9b166 2024-11-19T12:17:48,767 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6539e770, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:17:48,769 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:17:48,772 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41234, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:17:48,774 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T12:17:48,777 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45454, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T12:17:48,784 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x05038857 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@27c80704 2024-11-19T12:17:48,788 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e85caf7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:17:48,789 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4aba57ed to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2dad8999 2024-11-19T12:17:48,792 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27f597d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:17:48,794 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x629b91f8 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18de28d7 2024-11-19T12:17:48,796 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@139e2f4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:17:48,797 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62b16227 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@b406636 2024-11-19T12:17:48,800 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f12960, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:17:48,802 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53f30e40 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7915562a 2024-11-19T12:17:48,805 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7523ca41, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:17:48,806 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x691cbc80 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@502730d9 2024-11-19T12:17:48,809 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@763ae64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:17:48,810 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62cfc6db to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@b8793a3 2024-11-19T12:17:48,813 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@118d1c39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:17:48,814 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08b52656 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@71209fad 2024-11-19T12:17:48,818 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@380d8ec5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:17:48,819 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x634dc49c to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1665e2af 2024-11-19T12:17:48,823 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42ca2d00, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:17:48,828 DEBUG [hconnection-0x48af454-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:17:48,828 DEBUG [hconnection-0x7b5d524b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:17:48,829 DEBUG [hconnection-0xb857c1a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:17:48,830 DEBUG [hconnection-0x34e8847a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:17:48,831 DEBUG [hconnection-0x2ce6d49-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:17:48,832 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41238, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:17:48,832 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41240, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:17:48,832 DEBUG [hconnection-0x5e621c7f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:17:48,832 DEBUG [hconnection-0x59c3390f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:17:48,833 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41250, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:17:48,834 DEBUG [hconnection-0x4a22ee9a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:17:48,834 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:17:48,835 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41252, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:17:48,835 DEBUG [hconnection-0x624e92fc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:17:48,840 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41266, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:17:48,841 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41280, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:17:48,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-19T12:17:48,843 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41294, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:17:48,845 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:17:48,846 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:17:48,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-19T12:17:48,848 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:17:48,849 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41308, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:17:48,857 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41314, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:17:48,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-19T12:17:48,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:48,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:48,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:48,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:48,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:48,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:48,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:48,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-19T12:17:49,010 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:49,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-19T12:17:49,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:49,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:49,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:49,025 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:49,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:49,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/783c515d094a4e2fb459ce8ff150d198 is 50, key is test_row_0/A:col10/1732018668913/Put/seqid=0 2024-11-19T12:17:49,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:49,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741839_1015 (size=16681) 2024-11-19T12:17:49,087 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/783c515d094a4e2fb459ce8ff150d198 2024-11-19T12:17:49,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018729081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018729085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018729082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018729086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018729088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-19T12:17:49,200 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:49,201 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-19T12:17:49,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:49,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:49,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:49,210 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:49,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:49,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:49,236 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/26a15a033f314009b8b3785cdcc2f02c is 50, key is test_row_0/B:col10/1732018668913/Put/seqid=0 2024-11-19T12:17:49,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018729248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018729249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018729250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018729250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018729251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741840_1016 (size=12001) 2024-11-19T12:17:49,263 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/26a15a033f314009b8b3785cdcc2f02c 2024-11-19T12:17:49,310 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/1f73f7ddbb594cef974bcc826565a7f7 is 50, key is test_row_0/C:col10/1732018668913/Put/seqid=0 2024-11-19T12:17:49,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741841_1017 (size=12001) 2024-11-19T12:17:49,369 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:49,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-19T12:17:49,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:49,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:49,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:49,373 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:49,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:49,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:49,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-19T12:17:49,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018729457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018729458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018729461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018729462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018729463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,532 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:49,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-19T12:17:49,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:49,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:49,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:49,533 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:49,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:49,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:49,690 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:49,691 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-19T12:17:49,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:49,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:49,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:49,691 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:49,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:49,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:49,719 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/1f73f7ddbb594cef974bcc826565a7f7 2024-11-19T12:17:49,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/783c515d094a4e2fb459ce8ff150d198 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/783c515d094a4e2fb459ce8ff150d198 2024-11-19T12:17:49,749 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/783c515d094a4e2fb459ce8ff150d198, entries=250, sequenceid=14, filesize=16.3 K 2024-11-19T12:17:49,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/26a15a033f314009b8b3785cdcc2f02c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/26a15a033f314009b8b3785cdcc2f02c 2024-11-19T12:17:49,772 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/26a15a033f314009b8b3785cdcc2f02c, entries=150, sequenceid=14, filesize=11.7 K 2024-11-19T12:17:49,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/1f73f7ddbb594cef974bcc826565a7f7 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/1f73f7ddbb594cef974bcc826565a7f7 2024-11-19T12:17:49,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018729766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018729766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018729770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018729770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:49,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018729770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:49,789 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/1f73f7ddbb594cef974bcc826565a7f7, entries=150, sequenceid=14, filesize=11.7 K 2024-11-19T12:17:49,790 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 1bd7f6746cebf2fb7e39737ab25d16cc in 873ms, sequenceid=14, compaction requested=false 2024-11-19T12:17:49,792 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-19T12:17:49,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:49,845 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:49,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-19T12:17:49,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:49,847 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-19T12:17:49,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:49,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:49,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:49,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:49,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:49,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:49,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/74505db564fe4a98991f64e751d8dcad is 50, key is test_row_0/A:col10/1732018669079/Put/seqid=0 2024-11-19T12:17:49,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741842_1018 (size=12001) 2024-11-19T12:17:49,880 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/74505db564fe4a98991f64e751d8dcad 2024-11-19T12:17:49,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/1cf15817c24d4032b362bd444e91f4e1 is 50, key is test_row_0/B:col10/1732018669079/Put/seqid=0 2024-11-19T12:17:49,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741843_1019 (size=12001) 2024-11-19T12:17:49,932 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/1cf15817c24d4032b362bd444e91f4e1 2024-11-19T12:17:49,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-19T12:17:49,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b39f1927423742efacd940c933faa1bd is 50, key is test_row_0/C:col10/1732018669079/Put/seqid=0 2024-11-19T12:17:49,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741844_1020 (size=12001) 2024-11-19T12:17:50,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:50,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:50,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018730296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018730300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018730300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018730300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018730302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,372 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b39f1927423742efacd940c933faa1bd 2024-11-19T12:17:50,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/74505db564fe4a98991f64e751d8dcad as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/74505db564fe4a98991f64e751d8dcad 2024-11-19T12:17:50,402 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/74505db564fe4a98991f64e751d8dcad, entries=150, sequenceid=38, filesize=11.7 K 2024-11-19T12:17:50,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/1cf15817c24d4032b362bd444e91f4e1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1cf15817c24d4032b362bd444e91f4e1 2024-11-19T12:17:50,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018730410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018730410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018730411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018730405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018730412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,419 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1cf15817c24d4032b362bd444e91f4e1, entries=150, sequenceid=38, filesize=11.7 K 2024-11-19T12:17:50,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b39f1927423742efacd940c933faa1bd as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b39f1927423742efacd940c933faa1bd 2024-11-19T12:17:50,438 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b39f1927423742efacd940c933faa1bd, entries=150, sequenceid=38, filesize=11.7 K 2024-11-19T12:17:50,440 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 1bd7f6746cebf2fb7e39737ab25d16cc in 594ms, sequenceid=38, compaction requested=false 2024-11-19T12:17:50,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:50,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:50,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-19T12:17:50,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-19T12:17:50,447 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-19T12:17:50,447 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5940 sec 2024-11-19T12:17:50,452 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.6120 sec 2024-11-19T12:17:50,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:50,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-19T12:17:50,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:50,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:50,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:50,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:50,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:50,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:50,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/d97c68e66de44def9477ac926d6bf4a9 is 50, key is test_row_0/A:col10/1732018670621/Put/seqid=0 2024-11-19T12:17:50,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741845_1021 (size=12001) 2024-11-19T12:17:50,653 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/d97c68e66de44def9477ac926d6bf4a9 2024-11-19T12:17:50,675 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/aa81ccf311ab4d1e9c2cb93da0c69d1d is 50, key is test_row_0/B:col10/1732018670621/Put/seqid=0 2024-11-19T12:17:50,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018730668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018730669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018730677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018730679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018730668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741846_1022 (size=12001) 2024-11-19T12:17:50,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/aa81ccf311ab4d1e9c2cb93da0c69d1d 2024-11-19T12:17:50,722 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b6d8baadeac24b21b05c31f7f6121db6 is 50, key is test_row_0/C:col10/1732018670621/Put/seqid=0 2024-11-19T12:17:50,729 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-19T12:17:50,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741847_1023 (size=12001) 2024-11-19T12:17:50,742 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b6d8baadeac24b21b05c31f7f6121db6 2024-11-19T12:17:50,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/d97c68e66de44def9477ac926d6bf4a9 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/d97c68e66de44def9477ac926d6bf4a9 2024-11-19T12:17:50,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/d97c68e66de44def9477ac926d6bf4a9, entries=150, sequenceid=52, filesize=11.7 K 2024-11-19T12:17:50,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018730781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018730781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/aa81ccf311ab4d1e9c2cb93da0c69d1d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/aa81ccf311ab4d1e9c2cb93da0c69d1d 2024-11-19T12:17:50,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018730785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018730784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:50,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018730794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:50,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/aa81ccf311ab4d1e9c2cb93da0c69d1d, entries=150, sequenceid=52, filesize=11.7 K 2024-11-19T12:17:50,807 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T12:17:50,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b6d8baadeac24b21b05c31f7f6121db6 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b6d8baadeac24b21b05c31f7f6121db6 2024-11-19T12:17:50,810 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-19T12:17:50,825 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b6d8baadeac24b21b05c31f7f6121db6, entries=150, sequenceid=52, filesize=11.7 K 2024-11-19T12:17:50,825 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-19T12:17:50,827 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 1bd7f6746cebf2fb7e39737ab25d16cc in 203ms, sequenceid=52, compaction requested=true 2024-11-19T12:17:50,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:50,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:17:50,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:50,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:17:50,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:50,830 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:50,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:17:50,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:17:50,831 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:50,834 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:50,836 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/B is initiating minor compaction (all files) 2024-11-19T12:17:50,836 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/B in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:50,837 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/26a15a033f314009b8b3785cdcc2f02c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1cf15817c24d4032b362bd444e91f4e1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/aa81ccf311ab4d1e9c2cb93da0c69d1d] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=35.2 K 2024-11-19T12:17:50,838 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:50,838 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 26a15a033f314009b8b3785cdcc2f02c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732018668843 2024-11-19T12:17:50,838 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/A is initiating minor compaction (all files) 2024-11-19T12:17:50,838 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/A in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:50,839 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/783c515d094a4e2fb459ce8ff150d198, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/74505db564fe4a98991f64e751d8dcad, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/d97c68e66de44def9477ac926d6bf4a9] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=39.7 K 2024-11-19T12:17:50,840 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 1cf15817c24d4032b362bd444e91f4e1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732018669079 2024-11-19T12:17:50,840 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 783c515d094a4e2fb459ce8ff150d198, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732018668843 2024-11-19T12:17:50,841 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting aa81ccf311ab4d1e9c2cb93da0c69d1d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732018670296 2024-11-19T12:17:50,843 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74505db564fe4a98991f64e751d8dcad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732018669079 2024-11-19T12:17:50,844 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting d97c68e66de44def9477ac926d6bf4a9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732018670296 2024-11-19T12:17:50,888 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#B#compaction#10 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:50,889 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/d01fb2113adc4b71b71dd560fae54dbc is 50, key is test_row_0/B:col10/1732018670621/Put/seqid=0 2024-11-19T12:17:50,893 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#A#compaction#9 average throughput is 0.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:50,894 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/ceb87a3d466b4f358424f2bfbbcc4b51 is 50, key is test_row_0/A:col10/1732018670621/Put/seqid=0 2024-11-19T12:17:50,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741848_1024 (size=12104) 2024-11-19T12:17:50,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741849_1025 (size=12104) 2024-11-19T12:17:50,941 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/d01fb2113adc4b71b71dd560fae54dbc as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/d01fb2113adc4b71b71dd560fae54dbc 2024-11-19T12:17:50,941 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/ceb87a3d466b4f358424f2bfbbcc4b51 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/ceb87a3d466b4f358424f2bfbbcc4b51 2024-11-19T12:17:50,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-19T12:17:50,957 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-19T12:17:50,965 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/B of 1bd7f6746cebf2fb7e39737ab25d16cc into d01fb2113adc4b71b71dd560fae54dbc(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:50,966 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:50,966 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:17:50,966 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/B, priority=13, startTime=1732018670830; duration=0sec 2024-11-19T12:17:50,966 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/A of 1bd7f6746cebf2fb7e39737ab25d16cc into ceb87a3d466b4f358424f2bfbbcc4b51(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:50,966 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:50,966 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/A, priority=13, startTime=1732018670829; duration=0sec 2024-11-19T12:17:50,966 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:17:50,966 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:B 2024-11-19T12:17:50,966 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:50,966 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:A 2024-11-19T12:17:50,966 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:50,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-19T12:17:50,969 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:17:50,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-19T12:17:50,970 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:17:50,971 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:17:50,973 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:50,973 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/C is initiating minor compaction (all files) 2024-11-19T12:17:50,973 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/C in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:50,974 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/1f73f7ddbb594cef974bcc826565a7f7, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b39f1927423742efacd940c933faa1bd, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b6d8baadeac24b21b05c31f7f6121db6] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=35.2 K 2024-11-19T12:17:50,975 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f73f7ddbb594cef974bcc826565a7f7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732018668843 2024-11-19T12:17:50,977 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting b39f1927423742efacd940c933faa1bd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732018669079 2024-11-19T12:17:50,978 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting b6d8baadeac24b21b05c31f7f6121db6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732018670296 2024-11-19T12:17:50,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:50,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-19T12:17:51,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:51,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:51,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:51,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:51,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:51,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:51,012 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#C#compaction#11 average throughput is 0.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:51,017 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/5b068f700dab4b9bbad0a32d44ae2e10 is 50, key is test_row_0/A:col10/1732018670990/Put/seqid=0 2024-11-19T12:17:51,018 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/5216d19ebfe94fcf8cf4c0fc129c25ce is 50, key is test_row_0/C:col10/1732018670621/Put/seqid=0 2024-11-19T12:17:51,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018731023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018731026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018731026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018731028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018731030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741850_1026 (size=12001) 2024-11-19T12:17:51,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/5b068f700dab4b9bbad0a32d44ae2e10 2024-11-19T12:17:51,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741851_1027 (size=12104) 2024-11-19T12:17:51,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-19T12:17:51,078 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/6d0ea21eff2a423fb9698c569926954a is 50, key is test_row_0/B:col10/1732018670990/Put/seqid=0 2024-11-19T12:17:51,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741852_1028 (size=12001) 2024-11-19T12:17:51,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/6d0ea21eff2a423fb9698c569926954a 2024-11-19T12:17:51,123 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:51,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-19T12:17:51,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:51,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:51,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:51,125 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:51,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:51,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:51,130 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/381d13661c6d48afaa1f2bf29e554161 is 50, key is test_row_0/C:col10/1732018670990/Put/seqid=0 2024-11-19T12:17:51,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018731133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018731135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018731136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018731137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018731143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741853_1029 (size=12001) 2024-11-19T12:17:51,163 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/381d13661c6d48afaa1f2bf29e554161 2024-11-19T12:17:51,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/5b068f700dab4b9bbad0a32d44ae2e10 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/5b068f700dab4b9bbad0a32d44ae2e10 2024-11-19T12:17:51,187 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/5b068f700dab4b9bbad0a32d44ae2e10, entries=150, sequenceid=79, filesize=11.7 K 2024-11-19T12:17:51,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/6d0ea21eff2a423fb9698c569926954a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6d0ea21eff2a423fb9698c569926954a 2024-11-19T12:17:51,200 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6d0ea21eff2a423fb9698c569926954a, entries=150, sequenceid=79, filesize=11.7 K 2024-11-19T12:17:51,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/381d13661c6d48afaa1f2bf29e554161 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/381d13661c6d48afaa1f2bf29e554161 2024-11-19T12:17:51,218 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/381d13661c6d48afaa1f2bf29e554161, entries=150, sequenceid=79, filesize=11.7 K 2024-11-19T12:17:51,220 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 1bd7f6746cebf2fb7e39737ab25d16cc in 223ms, sequenceid=79, compaction requested=false 2024-11-19T12:17:51,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:51,236 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T12:17:51,236 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T12:17:51,239 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-19T12:17:51,240 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-19T12:17:51,241 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:17:51,241 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T12:17:51,242 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T12:17:51,242 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-19T12:17:51,243 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-19T12:17:51,243 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-19T12:17:51,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-19T12:17:51,281 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:51,282 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-19T12:17:51,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:51,282 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-19T12:17:51,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:51,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:51,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:51,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:51,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:51,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:51,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/6b5f00b248dd439ab392c243b2822e8d is 50, key is test_row_0/A:col10/1732018671022/Put/seqid=0 2024-11-19T12:17:51,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741854_1030 (size=12001) 2024-11-19T12:17:51,331 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/6b5f00b248dd439ab392c243b2822e8d 2024-11-19T12:17:51,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:51,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:51,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/010a0990052b49f6b6a77762ff174ff8 is 50, key is test_row_0/B:col10/1732018671022/Put/seqid=0 2024-11-19T12:17:51,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741855_1031 (size=12001) 2024-11-19T12:17:51,385 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/010a0990052b49f6b6a77762ff174ff8 2024-11-19T12:17:51,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018731387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018731389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018731398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018731399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018731401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/8b38b008ccf744c08ef2a545024bd0e9 is 50, key is test_row_0/C:col10/1732018671022/Put/seqid=0 2024-11-19T12:17:51,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741856_1032 (size=12001) 2024-11-19T12:17:51,428 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/8b38b008ccf744c08ef2a545024bd0e9 2024-11-19T12:17:51,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/6b5f00b248dd439ab392c243b2822e8d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/6b5f00b248dd439ab392c243b2822e8d 2024-11-19T12:17:51,461 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/6b5f00b248dd439ab392c243b2822e8d, entries=150, sequenceid=90, filesize=11.7 K 2024-11-19T12:17:51,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/010a0990052b49f6b6a77762ff174ff8 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/010a0990052b49f6b6a77762ff174ff8 2024-11-19T12:17:51,473 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/5216d19ebfe94fcf8cf4c0fc129c25ce as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/5216d19ebfe94fcf8cf4c0fc129c25ce 2024-11-19T12:17:51,477 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/010a0990052b49f6b6a77762ff174ff8, entries=150, sequenceid=90, filesize=11.7 K 2024-11-19T12:17:51,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/8b38b008ccf744c08ef2a545024bd0e9 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/8b38b008ccf744c08ef2a545024bd0e9 2024-11-19T12:17:51,490 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/C of 1bd7f6746cebf2fb7e39737ab25d16cc into 5216d19ebfe94fcf8cf4c0fc129c25ce(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:51,490 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:51,490 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/C, priority=13, startTime=1732018670830; duration=0sec 2024-11-19T12:17:51,490 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:51,490 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:C 2024-11-19T12:17:51,495 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/8b38b008ccf744c08ef2a545024bd0e9, entries=150, sequenceid=90, filesize=11.7 K 2024-11-19T12:17:51,501 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 1bd7f6746cebf2fb7e39737ab25d16cc in 219ms, sequenceid=90, compaction requested=true 2024-11-19T12:17:51,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:51,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:51,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-19T12:17:51,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-19T12:17:51,508 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-19T12:17:51,508 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 532 msec 2024-11-19T12:17:51,511 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 544 msec 2024-11-19T12:17:51,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:51,513 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-19T12:17:51,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:51,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:51,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:51,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:51,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:51,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:51,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/898b9cf3d29047f58a37ba2422954dab is 50, key is test_row_0/A:col10/1732018671394/Put/seqid=0 2024-11-19T12:17:51,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018731518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018731522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018731530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018731531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018731531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741857_1033 (size=12001) 2024-11-19T12:17:51,548 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/898b9cf3d29047f58a37ba2422954dab 2024-11-19T12:17:51,571 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/4fac8d5a36134c948cd79323755ff097 is 50, key is test_row_0/B:col10/1732018671394/Put/seqid=0 2024-11-19T12:17:51,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-19T12:17:51,575 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-19T12:17:51,579 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:17:51,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-19T12:17:51,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-19T12:17:51,583 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:17:51,584 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:17:51,585 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:17:51,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741858_1034 (size=12001) 2024-11-19T12:17:51,604 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/4fac8d5a36134c948cd79323755ff097 2024-11-19T12:17:51,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/75cdb9f3f0e349b6b541f482e9433b70 is 50, key is test_row_0/C:col10/1732018671394/Put/seqid=0 2024-11-19T12:17:51,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018731633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018731634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018731637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018731638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018731639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741859_1035 (size=12001) 2024-11-19T12:17:51,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/75cdb9f3f0e349b6b541f482e9433b70 2024-11-19T12:17:51,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/898b9cf3d29047f58a37ba2422954dab as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/898b9cf3d29047f58a37ba2422954dab 2024-11-19T12:17:51,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/898b9cf3d29047f58a37ba2422954dab, entries=150, sequenceid=120, filesize=11.7 K 2024-11-19T12:17:51,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/4fac8d5a36134c948cd79323755ff097 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/4fac8d5a36134c948cd79323755ff097 2024-11-19T12:17:51,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-19T12:17:51,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/4fac8d5a36134c948cd79323755ff097, entries=150, sequenceid=120, filesize=11.7 K 2024-11-19T12:17:51,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/75cdb9f3f0e349b6b541f482e9433b70 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/75cdb9f3f0e349b6b541f482e9433b70 2024-11-19T12:17:51,717 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/75cdb9f3f0e349b6b541f482e9433b70, entries=150, sequenceid=120, filesize=11.7 K 2024-11-19T12:17:51,719 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=40.25 KB/41220 for 1bd7f6746cebf2fb7e39737ab25d16cc in 206ms, sequenceid=120, compaction requested=true 2024-11-19T12:17:51,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:51,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:17:51,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:51,720 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:17:51,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:17:51,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:51,720 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:17:51,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:17:51,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:17:51,726 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:17:51,726 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/B is initiating minor compaction (all files) 2024-11-19T12:17:51,726 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/B in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:51,726 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/d01fb2113adc4b71b71dd560fae54dbc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6d0ea21eff2a423fb9698c569926954a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/010a0990052b49f6b6a77762ff174ff8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/4fac8d5a36134c948cd79323755ff097] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=47.0 K 2024-11-19T12:17:51,728 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:17:51,728 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/A is initiating minor compaction (all files) 2024-11-19T12:17:51,728 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/A in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:51,728 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/ceb87a3d466b4f358424f2bfbbcc4b51, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/5b068f700dab4b9bbad0a32d44ae2e10, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/6b5f00b248dd439ab392c243b2822e8d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/898b9cf3d29047f58a37ba2422954dab] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=47.0 K 2024-11-19T12:17:51,729 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting d01fb2113adc4b71b71dd560fae54dbc, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732018670296 2024-11-19T12:17:51,729 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting ceb87a3d466b4f358424f2bfbbcc4b51, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732018670296 2024-11-19T12:17:51,730 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d0ea21eff2a423fb9698c569926954a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732018670674 2024-11-19T12:17:51,731 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 010a0990052b49f6b6a77762ff174ff8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732018671004 2024-11-19T12:17:51,731 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b068f700dab4b9bbad0a32d44ae2e10, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732018670674 2024-11-19T12:17:51,732 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 4fac8d5a36134c948cd79323755ff097, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732018671394 2024-11-19T12:17:51,732 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b5f00b248dd439ab392c243b2822e8d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732018671004 2024-11-19T12:17:51,734 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 898b9cf3d29047f58a37ba2422954dab, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732018671394 2024-11-19T12:17:51,740 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:51,740 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-19T12:17:51,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:51,741 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-19T12:17:51,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:51,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:51,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:51,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:51,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:51,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:51,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/1783bfa2412044af943fd8f8eed7a285 is 50, key is test_row_1/A:col10/1732018671523/Put/seqid=0 2024-11-19T12:17:51,766 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#B#compaction#22 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:51,767 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/1d7570e0543d4c1482bf693fc3e91a85 is 50, key is test_row_0/B:col10/1732018671394/Put/seqid=0 2024-11-19T12:17:51,772 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#A#compaction#23 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:51,773 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/3c16fa9d72ba4135983424e521e9ced1 is 50, key is test_row_0/A:col10/1732018671394/Put/seqid=0 2024-11-19T12:17:51,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741860_1036 (size=9657) 2024-11-19T12:17:51,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741862_1038 (size=12241) 2024-11-19T12:17:51,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741861_1037 (size=12241) 2024-11-19T12:17:51,808 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/3c16fa9d72ba4135983424e521e9ced1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/3c16fa9d72ba4135983424e521e9ced1 2024-11-19T12:17:51,812 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/1d7570e0543d4c1482bf693fc3e91a85 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1d7570e0543d4c1482bf693fc3e91a85 2024-11-19T12:17:51,824 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/B of 1bd7f6746cebf2fb7e39737ab25d16cc into 1d7570e0543d4c1482bf693fc3e91a85(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:51,824 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:51,824 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/A of 1bd7f6746cebf2fb7e39737ab25d16cc into 3c16fa9d72ba4135983424e521e9ced1(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:51,824 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:51,824 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/B, priority=12, startTime=1732018671720; duration=0sec 2024-11-19T12:17:51,824 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/A, priority=12, startTime=1732018671719; duration=0sec 2024-11-19T12:17:51,824 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:17:51,824 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:B 2024-11-19T12:17:51,824 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:17:51,825 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:A 2024-11-19T12:17:51,825 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:17:51,827 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:17:51,827 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/C is initiating minor compaction (all files) 2024-11-19T12:17:51,827 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/C in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:51,827 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/5216d19ebfe94fcf8cf4c0fc129c25ce, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/381d13661c6d48afaa1f2bf29e554161, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/8b38b008ccf744c08ef2a545024bd0e9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/75cdb9f3f0e349b6b541f482e9433b70] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=47.0 K 2024-11-19T12:17:51,828 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 5216d19ebfe94fcf8cf4c0fc129c25ce, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732018670296 2024-11-19T12:17:51,829 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 381d13661c6d48afaa1f2bf29e554161, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732018670674 2024-11-19T12:17:51,830 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b38b008ccf744c08ef2a545024bd0e9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732018671004 2024-11-19T12:17:51,831 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 75cdb9f3f0e349b6b541f482e9433b70, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732018671394 2024-11-19T12:17:51,855 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#C#compaction#24 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:51,856 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b613eba33c0b469ab4c68e3ac3c237d9 is 50, key is test_row_0/C:col10/1732018671394/Put/seqid=0 2024-11-19T12:17:51,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741863_1039 (size=12241) 2024-11-19T12:17:51,876 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b613eba33c0b469ab4c68e3ac3c237d9 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b613eba33c0b469ab4c68e3ac3c237d9 2024-11-19T12:17:51,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-19T12:17:51,912 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/C of 1bd7f6746cebf2fb7e39737ab25d16cc into b613eba33c0b469ab4c68e3ac3c237d9(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:51,912 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:51,913 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/C, priority=12, startTime=1732018671720; duration=0sec 2024-11-19T12:17:51,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:51,913 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:51,913 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:51,914 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:C 2024-11-19T12:17:51,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018731952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018731953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018731954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018731955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:51,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:51,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018731955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018732060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018732063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018732063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018732063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018732064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-19T12:17:52,193 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/1783bfa2412044af943fd8f8eed7a285 2024-11-19T12:17:52,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/e08a7cbd6ce6481dbe21d46e598bff0b is 50, key is test_row_1/B:col10/1732018671523/Put/seqid=0 2024-11-19T12:17:52,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741864_1040 (size=9657) 2024-11-19T12:17:52,249 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/e08a7cbd6ce6481dbe21d46e598bff0b 2024-11-19T12:17:52,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018732264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018732268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018732270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018732268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018732270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/0f7246f61fee4bb283b30488cca79295 is 50, key is test_row_1/C:col10/1732018671523/Put/seqid=0 2024-11-19T12:17:52,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741865_1041 (size=9657) 2024-11-19T12:17:52,304 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/0f7246f61fee4bb283b30488cca79295 2024-11-19T12:17:52,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/1783bfa2412044af943fd8f8eed7a285 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/1783bfa2412044af943fd8f8eed7a285 2024-11-19T12:17:52,327 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/1783bfa2412044af943fd8f8eed7a285, entries=100, sequenceid=129, filesize=9.4 K 2024-11-19T12:17:52,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/e08a7cbd6ce6481dbe21d46e598bff0b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/e08a7cbd6ce6481dbe21d46e598bff0b 2024-11-19T12:17:52,341 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/e08a7cbd6ce6481dbe21d46e598bff0b, entries=100, sequenceid=129, filesize=9.4 K 2024-11-19T12:17:52,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/0f7246f61fee4bb283b30488cca79295 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/0f7246f61fee4bb283b30488cca79295 2024-11-19T12:17:52,356 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/0f7246f61fee4bb283b30488cca79295, entries=100, sequenceid=129, filesize=9.4 K 2024-11-19T12:17:52,358 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for 1bd7f6746cebf2fb7e39737ab25d16cc in 617ms, sequenceid=129, compaction requested=false 2024-11-19T12:17:52,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:52,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:52,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-19T12:17:52,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-19T12:17:52,364 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-19T12:17:52,365 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 776 msec 2024-11-19T12:17:52,368 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 787 msec 2024-11-19T12:17:52,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:52,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-11-19T12:17:52,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:52,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:52,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:52,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:52,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:52,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:52,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018732606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018732606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018732609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018732612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,623 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/aa0e7458b7fc4b58bd7479cbe9cb487f is 50, key is test_row_0/A:col10/1732018671949/Put/seqid=0 2024-11-19T12:17:52,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018732622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741866_1042 (size=14541) 2024-11-19T12:17:52,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/aa0e7458b7fc4b58bd7479cbe9cb487f 2024-11-19T12:17:52,677 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/693d04812c8f471694adb0d658ca5d9d is 50, key is test_row_0/B:col10/1732018671949/Put/seqid=0 2024-11-19T12:17:52,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-19T12:17:52,689 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-19T12:17:52,692 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:17:52,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-19T12:17:52,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-19T12:17:52,695 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:17:52,696 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:17:52,696 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:17:52,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741867_1043 (size=12151) 2024-11-19T12:17:52,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018732714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018732714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018732718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018732718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018732730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-19T12:17:52,850 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:52,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-19T12:17:52,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:52,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:52,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:52,852 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:52,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:52,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:52,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018732918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018732920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018732921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018732922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:52,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018732937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:52,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-19T12:17:53,005 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:53,006 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-19T12:17:53,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:53,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:53,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:53,007 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,113 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/693d04812c8f471694adb0d658ca5d9d 2024-11-19T12:17:53,136 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/873eae9d835b4a80a292a247cfe22c87 is 50, key is test_row_0/C:col10/1732018671949/Put/seqid=0 2024-11-19T12:17:53,160 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:53,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741868_1044 (size=12151) 2024-11-19T12:17:53,162 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-19T12:17:53,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:53,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:53,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:53,162 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,164 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/873eae9d835b4a80a292a247cfe22c87 2024-11-19T12:17:53,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/aa0e7458b7fc4b58bd7479cbe9cb487f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/aa0e7458b7fc4b58bd7479cbe9cb487f 2024-11-19T12:17:53,191 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/aa0e7458b7fc4b58bd7479cbe9cb487f, entries=200, sequenceid=164, filesize=14.2 K 2024-11-19T12:17:53,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/693d04812c8f471694adb0d658ca5d9d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/693d04812c8f471694adb0d658ca5d9d 2024-11-19T12:17:53,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/693d04812c8f471694adb0d658ca5d9d, entries=150, sequenceid=164, filesize=11.9 K 2024-11-19T12:17:53,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/873eae9d835b4a80a292a247cfe22c87 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/873eae9d835b4a80a292a247cfe22c87 2024-11-19T12:17:53,223 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/873eae9d835b4a80a292a247cfe22c87, entries=150, sequenceid=164, filesize=11.9 K 2024-11-19T12:17:53,226 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~194.56 KB/199230, heapSize ~510.47 KB/522720, currentSize=13.42 KB/13740 for 1bd7f6746cebf2fb7e39737ab25d16cc in 622ms, sequenceid=164, compaction requested=true 2024-11-19T12:17:53,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:53,227 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:53,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:17:53,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:53,228 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:53,229 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36439 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:53,230 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/A is initiating minor compaction (all files) 2024-11-19T12:17:53,230 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/A in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:53,230 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/3c16fa9d72ba4135983424e521e9ced1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/1783bfa2412044af943fd8f8eed7a285, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/aa0e7458b7fc4b58bd7479cbe9cb487f] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=35.6 K 2024-11-19T12:17:53,230 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:53,231 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/B is initiating minor compaction (all files) 2024-11-19T12:17:53,231 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c16fa9d72ba4135983424e521e9ced1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732018671394 2024-11-19T12:17:53,231 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/B in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:53,231 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1d7570e0543d4c1482bf693fc3e91a85, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/e08a7cbd6ce6481dbe21d46e598bff0b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/693d04812c8f471694adb0d658ca5d9d] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=33.3 K 2024-11-19T12:17:53,232 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d7570e0543d4c1482bf693fc3e91a85, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732018671394 2024-11-19T12:17:53,232 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1783bfa2412044af943fd8f8eed7a285, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732018671523 2024-11-19T12:17:53,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:17:53,233 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa0e7458b7fc4b58bd7479cbe9cb487f, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732018671949 2024-11-19T12:17:53,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:53,234 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting e08a7cbd6ce6481dbe21d46e598bff0b, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732018671523 2024-11-19T12:17:53,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:17:53,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:17:53,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:53,235 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 693d04812c8f471694adb0d658ca5d9d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732018671949 2024-11-19T12:17:53,242 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-19T12:17:53,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:53,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:53,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:53,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:53,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:53,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:53,251 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#A#compaction#30 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:53,252 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/4d1e8380e83a44c48ad306a454a3dbe9 is 50, key is test_row_0/A:col10/1732018671949/Put/seqid=0 2024-11-19T12:17:53,258 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#B#compaction#31 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:53,259 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/ee99d9253b1e45b99000aa8291857f21 is 50, key is test_row_0/B:col10/1732018671949/Put/seqid=0 2024-11-19T12:17:53,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/3cb386632e524b7f90494fa82f354c00 is 50, key is test_row_0/A:col10/1732018673231/Put/seqid=0 2024-11-19T12:17:53,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018733275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018733277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018733277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018733277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018733278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-19T12:17:53,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741870_1046 (size=12493) 2024-11-19T12:17:53,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741869_1045 (size=12493) 2024-11-19T12:17:53,317 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/ee99d9253b1e45b99000aa8291857f21 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/ee99d9253b1e45b99000aa8291857f21 2024-11-19T12:17:53,318 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:53,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-19T12:17:53,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:53,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:53,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:53,319 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,325 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/4d1e8380e83a44c48ad306a454a3dbe9 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/4d1e8380e83a44c48ad306a454a3dbe9 2024-11-19T12:17:53,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741871_1047 (size=9757) 2024-11-19T12:17:53,332 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/3cb386632e524b7f90494fa82f354c00 2024-11-19T12:17:53,332 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/B of 1bd7f6746cebf2fb7e39737ab25d16cc into ee99d9253b1e45b99000aa8291857f21(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:53,332 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:53,332 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/B, priority=13, startTime=1732018673228; duration=0sec 2024-11-19T12:17:53,332 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:17:53,332 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:B 2024-11-19T12:17:53,333 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:53,336 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:53,336 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/C is initiating minor compaction (all files) 2024-11-19T12:17:53,336 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/C in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:53,337 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b613eba33c0b469ab4c68e3ac3c237d9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/0f7246f61fee4bb283b30488cca79295, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/873eae9d835b4a80a292a247cfe22c87] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=33.3 K 2024-11-19T12:17:53,338 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting b613eba33c0b469ab4c68e3ac3c237d9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732018671394 2024-11-19T12:17:53,338 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f7246f61fee4bb283b30488cca79295, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732018671523 2024-11-19T12:17:53,341 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 873eae9d835b4a80a292a247cfe22c87, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732018671949 2024-11-19T12:17:53,341 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/A of 1bd7f6746cebf2fb7e39737ab25d16cc into 4d1e8380e83a44c48ad306a454a3dbe9(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:53,341 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:53,342 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/A, priority=13, startTime=1732018673226; duration=0sec 2024-11-19T12:17:53,342 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:53,342 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:A 2024-11-19T12:17:53,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/1dc6e049f5464fcea976198dd8b4895c is 50, key is test_row_0/B:col10/1732018673231/Put/seqid=0 2024-11-19T12:17:53,363 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#C#compaction#34 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:53,364 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/3b784d65742d42c7acac4e72d9a06439 is 50, key is test_row_0/C:col10/1732018671949/Put/seqid=0 2024-11-19T12:17:53,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741872_1048 (size=9757) 2024-11-19T12:17:53,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741873_1049 (size=12493) 2024-11-19T12:17:53,383 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018733382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018733385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018733386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018733386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018733387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,394 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/3b784d65742d42c7acac4e72d9a06439 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/3b784d65742d42c7acac4e72d9a06439 2024-11-19T12:17:53,405 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/C of 1bd7f6746cebf2fb7e39737ab25d16cc into 3b784d65742d42c7acac4e72d9a06439(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:53,405 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:53,405 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/C, priority=13, startTime=1732018673233; duration=0sec 2024-11-19T12:17:53,406 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:53,406 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:C 2024-11-19T12:17:53,474 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:53,475 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-19T12:17:53,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:53,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:53,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:53,476 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018733586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018733590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018733592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018733593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018733594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,630 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:53,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-19T12:17:53,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:53,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:53,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:53,632 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,781 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/1dc6e049f5464fcea976198dd8b4895c 2024-11-19T12:17:53,785 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:53,787 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-19T12:17:53,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:53,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:53,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:53,787 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,799 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/27b4fea7debc41559d69293e8b0f0e01 is 50, key is test_row_0/C:col10/1732018673231/Put/seqid=0 2024-11-19T12:17:53,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-19T12:17:53,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741874_1050 (size=9757) 2024-11-19T12:17:53,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018733891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018733895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018733895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018733897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:53,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018733898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:53,942 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:53,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-19T12:17:53,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:53,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:53,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:53,943 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:53,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,097 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:54,099 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-19T12:17:54,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:54,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:54,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:54,099 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,225 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/27b4fea7debc41559d69293e8b0f0e01 2024-11-19T12:17:54,234 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/3cb386632e524b7f90494fa82f354c00 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/3cb386632e524b7f90494fa82f354c00 2024-11-19T12:17:54,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/3cb386632e524b7f90494fa82f354c00, entries=100, sequenceid=179, filesize=9.5 K 2024-11-19T12:17:54,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/1dc6e049f5464fcea976198dd8b4895c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1dc6e049f5464fcea976198dd8b4895c 2024-11-19T12:17:54,252 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:54,253 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-19T12:17:54,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:54,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:54,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:54,253 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1dc6e049f5464fcea976198dd8b4895c, entries=100, sequenceid=179, filesize=9.5 K 2024-11-19T12:17:54,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/27b4fea7debc41559d69293e8b0f0e01 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/27b4fea7debc41559d69293e8b0f0e01 2024-11-19T12:17:54,281 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/27b4fea7debc41559d69293e8b0f0e01, entries=100, sequenceid=179, filesize=9.5 K 2024-11-19T12:17:54,283 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 1bd7f6746cebf2fb7e39737ab25d16cc in 1046ms, sequenceid=179, compaction requested=false 2024-11-19T12:17:54,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:54,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:54,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-19T12:17:54,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:54,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:54,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:54,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:54,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:54,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:54,407 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:54,411 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-19T12:17:54,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:54,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:54,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:54,411 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,416 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/17bd7e1610b4418d881e75be3aa2430d is 50, key is test_row_0/A:col10/1732018674398/Put/seqid=0 2024-11-19T12:17:54,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:54,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018734411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:54,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:54,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018734416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:54,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:54,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:54,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018734416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:54,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018734415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:54,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:54,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018734425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:54,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741875_1051 (size=12151) 2024-11-19T12:17:54,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/17bd7e1610b4418d881e75be3aa2430d 2024-11-19T12:17:54,460 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/d1c510e2d9d14f308340bb5b6ec83904 is 50, key is test_row_0/B:col10/1732018674398/Put/seqid=0 2024-11-19T12:17:54,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741876_1052 (size=12151) 2024-11-19T12:17:54,528 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:54,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018734527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:54,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:54,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018734527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:54,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:54,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018734528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:54,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:54,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018734532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:54,565 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:54,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-19T12:17:54,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:54,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:54,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:54,569 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,722 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:54,723 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-19T12:17:54,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:54,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:54,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:54,724 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:54,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018734731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:54,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:54,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018734732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:54,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:54,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018734734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:54,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:54,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018734736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:54,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-19T12:17:54,869 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/d1c510e2d9d14f308340bb5b6ec83904 2024-11-19T12:17:54,878 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:54,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-19T12:17:54,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:54,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:54,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:54,879 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:54,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/c39b3ffe81b946ed935cb47fc164681d is 50, key is test_row_0/C:col10/1732018674398/Put/seqid=0 2024-11-19T12:17:54,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741877_1053 (size=12151) 2024-11-19T12:17:54,900 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/c39b3ffe81b946ed935cb47fc164681d 2024-11-19T12:17:54,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/17bd7e1610b4418d881e75be3aa2430d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/17bd7e1610b4418d881e75be3aa2430d 2024-11-19T12:17:54,918 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/17bd7e1610b4418d881e75be3aa2430d, entries=150, sequenceid=205, filesize=11.9 K 2024-11-19T12:17:54,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/d1c510e2d9d14f308340bb5b6ec83904 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/d1c510e2d9d14f308340bb5b6ec83904 2024-11-19T12:17:54,929 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/d1c510e2d9d14f308340bb5b6ec83904, entries=150, sequenceid=205, filesize=11.9 K 2024-11-19T12:17:54,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/c39b3ffe81b946ed935cb47fc164681d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/c39b3ffe81b946ed935cb47fc164681d 2024-11-19T12:17:54,941 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/c39b3ffe81b946ed935cb47fc164681d, entries=150, sequenceid=205, filesize=11.9 K 2024-11-19T12:17:54,944 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 1bd7f6746cebf2fb7e39737ab25d16cc in 544ms, sequenceid=205, compaction requested=true 2024-11-19T12:17:54,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:54,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:17:54,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:54,944 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:54,944 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:54,945 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:17:54,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:54,946 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:54,947 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:54,947 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/B is initiating minor compaction (all files) 2024-11-19T12:17:54,947 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/A is initiating minor compaction (all files) 2024-11-19T12:17:54,947 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/B in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:54,947 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/A in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:54,947 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/ee99d9253b1e45b99000aa8291857f21, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1dc6e049f5464fcea976198dd8b4895c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/d1c510e2d9d14f308340bb5b6ec83904] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=33.6 K 2024-11-19T12:17:54,947 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/4d1e8380e83a44c48ad306a454a3dbe9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/3cb386632e524b7f90494fa82f354c00, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/17bd7e1610b4418d881e75be3aa2430d] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=33.6 K 2024-11-19T12:17:54,948 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting ee99d9253b1e45b99000aa8291857f21, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732018671949 2024-11-19T12:17:54,948 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d1e8380e83a44c48ad306a454a3dbe9, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732018671949 2024-11-19T12:17:54,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:17:54,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:17:54,948 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 1dc6e049f5464fcea976198dd8b4895c, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1732018673231 2024-11-19T12:17:54,949 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting d1c510e2d9d14f308340bb5b6ec83904, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732018673275 2024-11-19T12:17:54,949 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3cb386632e524b7f90494fa82f354c00, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1732018673231 2024-11-19T12:17:54,950 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17bd7e1610b4418d881e75be3aa2430d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732018673275 2024-11-19T12:17:54,964 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#A#compaction#39 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:54,965 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/a3ed39b837204d82a70505ff283d8db5 is 50, key is test_row_0/A:col10/1732018674398/Put/seqid=0 2024-11-19T12:17:54,967 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#B#compaction#40 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:54,970 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/478eaf48189646e3ab1e23344f0d1290 is 50, key is test_row_0/B:col10/1732018674398/Put/seqid=0 2024-11-19T12:17:54,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741878_1054 (size=12595) 2024-11-19T12:17:54,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741879_1055 (size=12595) 2024-11-19T12:17:54,997 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/478eaf48189646e3ab1e23344f0d1290 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/478eaf48189646e3ab1e23344f0d1290 2024-11-19T12:17:55,007 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/B of 1bd7f6746cebf2fb7e39737ab25d16cc into 478eaf48189646e3ab1e23344f0d1290(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:55,008 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:55,008 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/B, priority=13, startTime=1732018674944; duration=0sec 2024-11-19T12:17:55,008 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:17:55,009 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:B 2024-11-19T12:17:55,009 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:55,011 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:55,011 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/C is initiating minor compaction (all files) 2024-11-19T12:17:55,011 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/C in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:55,011 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/3b784d65742d42c7acac4e72d9a06439, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/27b4fea7debc41559d69293e8b0f0e01, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/c39b3ffe81b946ed935cb47fc164681d] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=33.6 K 2024-11-19T12:17:55,012 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b784d65742d42c7acac4e72d9a06439, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732018671949 2024-11-19T12:17:55,013 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 27b4fea7debc41559d69293e8b0f0e01, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1732018673231 2024-11-19T12:17:55,016 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting c39b3ffe81b946ed935cb47fc164681d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732018673275 2024-11-19T12:17:55,027 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#C#compaction#41 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:55,028 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b9edb644a3e345ceaa96f628a617b4ea is 50, key is test_row_0/C:col10/1732018674398/Put/seqid=0 2024-11-19T12:17:55,031 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:55,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-19T12:17:55,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:55,032 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-19T12:17:55,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:55,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:55,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:55,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:55,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:55,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:55,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:55,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:55,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/34a53df7fd594c3b94f3bb70fd330ffb is 50, key is test_row_0/A:col10/1732018674411/Put/seqid=0 2024-11-19T12:17:55,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741880_1056 (size=12595) 2024-11-19T12:17:55,057 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b9edb644a3e345ceaa96f628a617b4ea as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b9edb644a3e345ceaa96f628a617b4ea 2024-11-19T12:17:55,068 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/C of 1bd7f6746cebf2fb7e39737ab25d16cc into b9edb644a3e345ceaa96f628a617b4ea(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:55,069 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:55,069 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/C, priority=13, startTime=1732018674946; duration=0sec 2024-11-19T12:17:55,070 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:55,070 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:C 2024-11-19T12:17:55,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018735066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018735068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018735069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018735071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741881_1057 (size=12151) 2024-11-19T12:17:55,080 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/34a53df7fd594c3b94f3bb70fd330ffb 2024-11-19T12:17:55,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/785249d5693f444fbf0f8387d3fe248e is 50, key is test_row_0/B:col10/1732018674411/Put/seqid=0 2024-11-19T12:17:55,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741882_1058 (size=12151) 2024-11-19T12:17:55,113 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/785249d5693f444fbf0f8387d3fe248e 2024-11-19T12:17:55,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/86f656ea955d43e486a4a39223a945f6 is 50, key is test_row_0/C:col10/1732018674411/Put/seqid=0 2024-11-19T12:17:55,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741883_1059 (size=12151) 2024-11-19T12:17:55,171 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/86f656ea955d43e486a4a39223a945f6 2024-11-19T12:17:55,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018735173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018735175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018735175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018735176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/34a53df7fd594c3b94f3bb70fd330ffb as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/34a53df7fd594c3b94f3bb70fd330ffb 2024-11-19T12:17:55,193 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/34a53df7fd594c3b94f3bb70fd330ffb, entries=150, sequenceid=220, filesize=11.9 K 2024-11-19T12:17:55,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/785249d5693f444fbf0f8387d3fe248e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/785249d5693f444fbf0f8387d3fe248e 2024-11-19T12:17:55,205 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/785249d5693f444fbf0f8387d3fe248e, entries=150, sequenceid=220, filesize=11.9 K 2024-11-19T12:17:55,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/86f656ea955d43e486a4a39223a945f6 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/86f656ea955d43e486a4a39223a945f6 2024-11-19T12:17:55,217 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/86f656ea955d43e486a4a39223a945f6, entries=150, sequenceid=220, filesize=11.9 K 2024-11-19T12:17:55,219 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 1bd7f6746cebf2fb7e39737ab25d16cc in 187ms, sequenceid=220, compaction requested=false 2024-11-19T12:17:55,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:55,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:55,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-19T12:17:55,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-19T12:17:55,225 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-19T12:17:55,225 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5250 sec 2024-11-19T12:17:55,229 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 2.5340 sec 2024-11-19T12:17:55,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:55,380 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-19T12:17:55,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:55,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:55,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:55,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:55,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:55,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:55,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/8e3ce5e460224431916b1e5dd041ac9e is 50, key is test_row_0/A:col10/1732018675379/Put/seqid=0 2024-11-19T12:17:55,391 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/a3ed39b837204d82a70505ff283d8db5 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/a3ed39b837204d82a70505ff283d8db5 2024-11-19T12:17:55,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018735390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018735392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018735394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018735392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,401 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/A of 1bd7f6746cebf2fb7e39737ab25d16cc into a3ed39b837204d82a70505ff283d8db5(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:55,401 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:55,401 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/A, priority=13, startTime=1732018674944; duration=0sec 2024-11-19T12:17:55,402 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:55,402 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:A 2024-11-19T12:17:55,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741884_1060 (size=14541) 2024-11-19T12:17:55,421 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/8e3ce5e460224431916b1e5dd041ac9e 2024-11-19T12:17:55,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018735430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,435 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/6e0374ecb4594a18b4a0d43085d6fe20 is 50, key is test_row_0/B:col10/1732018675379/Put/seqid=0 2024-11-19T12:17:55,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741885_1061 (size=12151) 2024-11-19T12:17:55,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018735497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018735498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018735499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018735499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,704 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018735701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018735702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:55,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018735703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018735703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:55,851 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/6e0374ecb4594a18b4a0d43085d6fe20 2024-11-19T12:17:55,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/5e6d5a9d39a648e1a8e00eece4b00c78 is 50, key is test_row_0/C:col10/1732018675379/Put/seqid=0 2024-11-19T12:17:55,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741886_1062 (size=12151) 2024-11-19T12:17:56,008 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:56,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018736007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:56,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:56,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018736007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:56,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:56,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018736008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:56,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:56,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018736008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:56,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/5e6d5a9d39a648e1a8e00eece4b00c78 2024-11-19T12:17:56,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/8e3ce5e460224431916b1e5dd041ac9e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/8e3ce5e460224431916b1e5dd041ac9e 2024-11-19T12:17:56,299 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/8e3ce5e460224431916b1e5dd041ac9e, entries=200, sequenceid=246, filesize=14.2 K 2024-11-19T12:17:56,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/6e0374ecb4594a18b4a0d43085d6fe20 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6e0374ecb4594a18b4a0d43085d6fe20 2024-11-19T12:17:56,309 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6e0374ecb4594a18b4a0d43085d6fe20, entries=150, sequenceid=246, filesize=11.9 K 2024-11-19T12:17:56,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/5e6d5a9d39a648e1a8e00eece4b00c78 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/5e6d5a9d39a648e1a8e00eece4b00c78 2024-11-19T12:17:56,319 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/5e6d5a9d39a648e1a8e00eece4b00c78, entries=150, sequenceid=246, filesize=11.9 K 2024-11-19T12:17:56,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 1bd7f6746cebf2fb7e39737ab25d16cc in 940ms, sequenceid=246, compaction requested=true 2024-11-19T12:17:56,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:56,321 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:56,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:17:56,323 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39287 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:56,323 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/A is initiating minor compaction (all files) 2024-11-19T12:17:56,323 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/A in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:56,323 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/a3ed39b837204d82a70505ff283d8db5, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/34a53df7fd594c3b94f3bb70fd330ffb, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/8e3ce5e460224431916b1e5dd041ac9e] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=38.4 K 2024-11-19T12:17:56,324 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3ed39b837204d82a70505ff283d8db5, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732018673275 2024-11-19T12:17:56,324 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:56,324 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:56,324 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34a53df7fd594c3b94f3bb70fd330ffb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732018674410 2024-11-19T12:17:56,325 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e3ce5e460224431916b1e5dd041ac9e, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732018675064 2024-11-19T12:17:56,326 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:56,326 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/B is initiating minor compaction (all files) 2024-11-19T12:17:56,326 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/B in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:56,326 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/478eaf48189646e3ab1e23344f0d1290, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/785249d5693f444fbf0f8387d3fe248e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6e0374ecb4594a18b4a0d43085d6fe20] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=36.0 K 2024-11-19T12:17:56,327 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 478eaf48189646e3ab1e23344f0d1290, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732018673275 2024-11-19T12:17:56,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:17:56,327 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 785249d5693f444fbf0f8387d3fe248e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732018674410 2024-11-19T12:17:56,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:56,328 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e0374ecb4594a18b4a0d43085d6fe20, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732018675064 2024-11-19T12:17:56,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:17:56,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:17:56,337 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#A#compaction#48 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:56,338 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/560edcc1c8ab4c9e9982bb58b8d36602 is 50, key is test_row_0/A:col10/1732018675379/Put/seqid=0 2024-11-19T12:17:56,342 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#B#compaction#49 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:56,342 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/4ddea4c545074efa9b27c965948c993f is 50, key is test_row_0/B:col10/1732018675379/Put/seqid=0 2024-11-19T12:17:56,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741888_1064 (size=12697) 2024-11-19T12:17:56,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741887_1063 (size=12697) 2024-11-19T12:17:56,385 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/560edcc1c8ab4c9e9982bb58b8d36602 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/560edcc1c8ab4c9e9982bb58b8d36602 2024-11-19T12:17:56,394 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/A of 1bd7f6746cebf2fb7e39737ab25d16cc into 560edcc1c8ab4c9e9982bb58b8d36602(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:56,394 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:56,394 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/A, priority=13, startTime=1732018676321; duration=0sec 2024-11-19T12:17:56,394 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:17:56,394 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:A 2024-11-19T12:17:56,394 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:56,395 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:56,395 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/C is initiating minor compaction (all files) 2024-11-19T12:17:56,396 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/C in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:56,396 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b9edb644a3e345ceaa96f628a617b4ea, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/86f656ea955d43e486a4a39223a945f6, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/5e6d5a9d39a648e1a8e00eece4b00c78] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=36.0 K 2024-11-19T12:17:56,397 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9edb644a3e345ceaa96f628a617b4ea, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732018673275 2024-11-19T12:17:56,397 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86f656ea955d43e486a4a39223a945f6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732018674410 2024-11-19T12:17:56,397 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e6d5a9d39a648e1a8e00eece4b00c78, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732018675064 2024-11-19T12:17:56,408 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#C#compaction#50 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:56,409 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/a7673bbd886a4dcfb5332818f70a8c8e is 50, key is test_row_0/C:col10/1732018675379/Put/seqid=0 2024-11-19T12:17:56,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741889_1065 (size=12697) 2024-11-19T12:17:56,427 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/a7673bbd886a4dcfb5332818f70a8c8e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/a7673bbd886a4dcfb5332818f70a8c8e 2024-11-19T12:17:56,437 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/C of 1bd7f6746cebf2fb7e39737ab25d16cc into a7673bbd886a4dcfb5332818f70a8c8e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:56,437 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:56,437 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/C, priority=13, startTime=1732018676328; duration=0sec 2024-11-19T12:17:56,438 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:56,438 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:C 2024-11-19T12:17:56,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:56,514 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-19T12:17:56,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:56,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:56,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:56,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:56,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:56,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:56,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/3fab50c1cc1249c7b25b5b16e1a01753 is 50, key is test_row_0/A:col10/1732018675389/Put/seqid=0 2024-11-19T12:17:56,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741890_1066 (size=12201) 2024-11-19T12:17:56,549 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/3fab50c1cc1249c7b25b5b16e1a01753 2024-11-19T12:17:56,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:56,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018736553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:56,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:56,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018736560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:56,565 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/b1d38b7bfccb4d46a5ce20d43840124b is 50, key is test_row_0/B:col10/1732018675389/Put/seqid=0 2024-11-19T12:17:56,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:56,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018736562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:56,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:56,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018736562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:56,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741891_1067 (size=12201) 2024-11-19T12:17:56,601 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/b1d38b7bfccb4d46a5ce20d43840124b 2024-11-19T12:17:56,614 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/fde937863610463f8180f1938181d1e5 is 50, key is test_row_0/C:col10/1732018675389/Put/seqid=0 2024-11-19T12:17:56,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741892_1068 (size=12201) 2024-11-19T12:17:56,636 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/fde937863610463f8180f1938181d1e5 2024-11-19T12:17:56,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/3fab50c1cc1249c7b25b5b16e1a01753 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/3fab50c1cc1249c7b25b5b16e1a01753 2024-11-19T12:17:56,649 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/3fab50c1cc1249c7b25b5b16e1a01753, entries=150, sequenceid=261, filesize=11.9 K 2024-11-19T12:17:56,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/b1d38b7bfccb4d46a5ce20d43840124b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/b1d38b7bfccb4d46a5ce20d43840124b 2024-11-19T12:17:56,656 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/b1d38b7bfccb4d46a5ce20d43840124b, entries=150, sequenceid=261, filesize=11.9 K 2024-11-19T12:17:56,657 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-19T12:17:56,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/fde937863610463f8180f1938181d1e5 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/fde937863610463f8180f1938181d1e5 2024-11-19T12:17:56,666 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/fde937863610463f8180f1938181d1e5, entries=150, sequenceid=261, filesize=11.9 K 2024-11-19T12:17:56,667 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 1bd7f6746cebf2fb7e39737ab25d16cc in 153ms, sequenceid=261, compaction requested=false 2024-11-19T12:17:56,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:56,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:56,672 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-19T12:17:56,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:56,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:56,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:56,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:56,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:56,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:56,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/6ac971f46e084fc096f24a7afa6f299f is 50, key is test_row_0/A:col10/1732018676668/Put/seqid=0 2024-11-19T12:17:56,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741893_1069 (size=17181) 2024-11-19T12:17:56,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/6ac971f46e084fc096f24a7afa6f299f 2024-11-19T12:17:56,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/af3a463fc19a44288cf2891c71784fa1 is 50, key is test_row_0/B:col10/1732018676668/Put/seqid=0 2024-11-19T12:17:56,719 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:56,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018736683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:56,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741894_1070 (size=12301) 2024-11-19T12:17:56,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:56,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018736719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:56,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:56,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018736720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:56,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:56,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018736720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:56,779 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/4ddea4c545074efa9b27c965948c993f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/4ddea4c545074efa9b27c965948c993f 2024-11-19T12:17:56,787 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/B of 1bd7f6746cebf2fb7e39737ab25d16cc into 4ddea4c545074efa9b27c965948c993f(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:56,787 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:56,787 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/B, priority=13, startTime=1732018676324; duration=0sec 2024-11-19T12:17:56,788 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:56,788 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:B 2024-11-19T12:17:56,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-19T12:17:56,803 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-19T12:17:56,806 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:17:56,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-19T12:17:56,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-19T12:17:56,809 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:17:56,810 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:17:56,810 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:17:56,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:56,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018736825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:56,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:56,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:56,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018736825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:56,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018736825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:56,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:56,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018736826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:56,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-19T12:17:56,964 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:56,964 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-19T12:17:56,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:56,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:56,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:56,965 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:56,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:56,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:57,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:57,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018737028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:57,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:57,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018737029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:57,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:57,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018737030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:57,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:57,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018737031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:57,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-19T12:17:57,120 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:57,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-19T12:17:57,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:57,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:57,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:57,122 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/af3a463fc19a44288cf2891c71784fa1 2024-11-19T12:17:57,122 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:57,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:57,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:57,138 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/3402f6ff70ac4240bec0b7a9827837e5 is 50, key is test_row_0/C:col10/1732018676668/Put/seqid=0 2024-11-19T12:17:57,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741895_1071 (size=12301) 2024-11-19T12:17:57,278 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:57,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-19T12:17:57,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:57,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:57,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:57,279 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:57,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:57,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:57,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:57,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018737333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:57,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:57,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018737333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:57,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:57,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018737336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:57,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:57,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018737336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:57,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-19T12:17:57,431 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:57,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-19T12:17:57,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:57,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:57,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:57,432 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:57,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:57,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:57,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:57,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018737438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:57,444 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4165 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., hostname=af314c41f984,36047,1732018661455, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:17:57,574 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/3402f6ff70ac4240bec0b7a9827837e5 2024-11-19T12:17:57,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/6ac971f46e084fc096f24a7afa6f299f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/6ac971f46e084fc096f24a7afa6f299f 2024-11-19T12:17:57,585 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:57,586 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-19T12:17:57,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:57,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:57,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:57,586 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:57,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:57,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:57,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/6ac971f46e084fc096f24a7afa6f299f, entries=250, sequenceid=289, filesize=16.8 K 2024-11-19T12:17:57,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/af3a463fc19a44288cf2891c71784fa1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/af3a463fc19a44288cf2891c71784fa1 2024-11-19T12:17:57,604 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/af3a463fc19a44288cf2891c71784fa1, entries=150, sequenceid=289, filesize=12.0 K 2024-11-19T12:17:57,606 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/3402f6ff70ac4240bec0b7a9827837e5 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/3402f6ff70ac4240bec0b7a9827837e5 2024-11-19T12:17:57,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/3402f6ff70ac4240bec0b7a9827837e5, entries=150, sequenceid=289, filesize=12.0 K 2024-11-19T12:17:57,615 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=46.96 KB/48090 for 1bd7f6746cebf2fb7e39737ab25d16cc in 943ms, sequenceid=289, compaction requested=true 2024-11-19T12:17:57,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:57,615 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:57,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:17:57,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:57,616 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:57,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:17:57,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:57,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:17:57,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:17:57,617 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42079 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:57,617 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/A is initiating minor compaction (all files) 2024-11-19T12:17:57,617 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/A in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:57,618 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/560edcc1c8ab4c9e9982bb58b8d36602, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/3fab50c1cc1249c7b25b5b16e1a01753, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/6ac971f46e084fc096f24a7afa6f299f] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=41.1 K 2024-11-19T12:17:57,619 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37199 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:57,619 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/B is initiating minor compaction (all files) 2024-11-19T12:17:57,619 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/B in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:57,619 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/4ddea4c545074efa9b27c965948c993f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/b1d38b7bfccb4d46a5ce20d43840124b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/af3a463fc19a44288cf2891c71784fa1] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=36.3 K 2024-11-19T12:17:57,619 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 560edcc1c8ab4c9e9982bb58b8d36602, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732018675064 2024-11-19T12:17:57,620 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ddea4c545074efa9b27c965948c993f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732018675064 2024-11-19T12:17:57,620 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3fab50c1cc1249c7b25b5b16e1a01753, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732018675389 2024-11-19T12:17:57,621 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting b1d38b7bfccb4d46a5ce20d43840124b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732018675389 2024-11-19T12:17:57,621 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6ac971f46e084fc096f24a7afa6f299f, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732018676560 2024-11-19T12:17:57,623 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting af3a463fc19a44288cf2891c71784fa1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732018676668 2024-11-19T12:17:57,641 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#A#compaction#57 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:57,642 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/11ec8c4b53af4e4f84c859de910c9fe4 is 50, key is test_row_0/A:col10/1732018676668/Put/seqid=0 2024-11-19T12:17:57,650 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#B#compaction#58 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:57,651 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/115fe210146f4365abe89fd3bac409c7 is 50, key is test_row_0/B:col10/1732018676668/Put/seqid=0 2024-11-19T12:17:57,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741896_1072 (size=12949) 2024-11-19T12:17:57,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741897_1073 (size=12949) 2024-11-19T12:17:57,696 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/115fe210146f4365abe89fd3bac409c7 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/115fe210146f4365abe89fd3bac409c7 2024-11-19T12:17:57,708 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/B of 1bd7f6746cebf2fb7e39737ab25d16cc into 115fe210146f4365abe89fd3bac409c7(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:57,708 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:57,708 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/B, priority=13, startTime=1732018677616; duration=0sec 2024-11-19T12:17:57,708 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:17:57,708 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:B 2024-11-19T12:17:57,708 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:57,710 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37199 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:57,710 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/C is initiating minor compaction (all files) 2024-11-19T12:17:57,710 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/C in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:57,710 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/a7673bbd886a4dcfb5332818f70a8c8e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/fde937863610463f8180f1938181d1e5, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/3402f6ff70ac4240bec0b7a9827837e5] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=36.3 K 2024-11-19T12:17:57,711 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting a7673bbd886a4dcfb5332818f70a8c8e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732018675064 2024-11-19T12:17:57,712 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting fde937863610463f8180f1938181d1e5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732018675389 2024-11-19T12:17:57,712 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 3402f6ff70ac4240bec0b7a9827837e5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732018676668 2024-11-19T12:17:57,728 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#C#compaction#59 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:57,728 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/d8f96c7825e64689a1c83b6defe3b8a2 is 50, key is test_row_0/C:col10/1732018676668/Put/seqid=0 2024-11-19T12:17:57,740 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:57,740 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-19T12:17:57,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:57,741 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-19T12:17:57,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:57,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:57,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:57,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:57,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:57,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:57,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/b851c34ae32d447c95655486a120d842 is 50, key is test_row_0/A:col10/1732018676699/Put/seqid=0 2024-11-19T12:17:57,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741898_1074 (size=12949) 2024-11-19T12:17:57,791 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/d8f96c7825e64689a1c83b6defe3b8a2 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/d8f96c7825e64689a1c83b6defe3b8a2 2024-11-19T12:17:57,801 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/C of 1bd7f6746cebf2fb7e39737ab25d16cc into d8f96c7825e64689a1c83b6defe3b8a2(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:57,801 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:57,801 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/C, priority=13, startTime=1732018677616; duration=0sec 2024-11-19T12:17:57,802 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:57,802 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:C 2024-11-19T12:17:57,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741899_1075 (size=9857) 2024-11-19T12:17:57,808 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/b851c34ae32d447c95655486a120d842 2024-11-19T12:17:57,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/c29c4a27514e474fae79deb4d5f671b8 is 50, key is test_row_0/B:col10/1732018676699/Put/seqid=0 2024-11-19T12:17:57,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741900_1076 (size=9857) 2024-11-19T12:17:57,839 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/c29c4a27514e474fae79deb4d5f671b8 2024-11-19T12:17:57,840 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:57,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/30491dae26ce4bceb7b6c36ea614ce13 is 50, key is test_row_0/C:col10/1732018676699/Put/seqid=0 2024-11-19T12:17:57,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741901_1077 (size=9857) 2024-11-19T12:17:57,865 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/30491dae26ce4bceb7b6c36ea614ce13 2024-11-19T12:17:57,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/b851c34ae32d447c95655486a120d842 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/b851c34ae32d447c95655486a120d842 2024-11-19T12:17:57,884 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/b851c34ae32d447c95655486a120d842, entries=100, sequenceid=301, filesize=9.6 K 2024-11-19T12:17:57,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/c29c4a27514e474fae79deb4d5f671b8 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/c29c4a27514e474fae79deb4d5f671b8 2024-11-19T12:17:57,894 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/c29c4a27514e474fae79deb4d5f671b8, entries=100, sequenceid=301, filesize=9.6 K 2024-11-19T12:17:57,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/30491dae26ce4bceb7b6c36ea614ce13 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/30491dae26ce4bceb7b6c36ea614ce13 2024-11-19T12:17:57,906 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/30491dae26ce4bceb7b6c36ea614ce13, entries=100, sequenceid=301, filesize=9.6 K 2024-11-19T12:17:57,908 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=127.47 KB/130530 for 1bd7f6746cebf2fb7e39737ab25d16cc in 167ms, sequenceid=301, compaction requested=false 2024-11-19T12:17:57,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:57,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:57,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-19T12:17:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-19T12:17:57,910 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-19T12:17:57,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:57,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:57,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:57,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:57,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:57,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:57,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-19T12:17:57,918 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-19T12:17:57,919 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1010 sec 2024-11-19T12:17:57,922 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.1140 sec 2024-11-19T12:17:57,922 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/36396cea97fa41f2b108f0296cac2a34 is 50, key is test_row_0/A:col10/1732018677906/Put/seqid=0 2024-11-19T12:17:57,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:57,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018737934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:57,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:57,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018737935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:57,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:57,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018737939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:57,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:57,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018737940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:57,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741902_1078 (size=12301) 2024-11-19T12:17:58,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:58,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018738042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:58,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:58,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018738043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:58,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:58,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018738046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:58,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:58,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018738052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:58,077 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/11ec8c4b53af4e4f84c859de910c9fe4 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/11ec8c4b53af4e4f84c859de910c9fe4 2024-11-19T12:17:58,093 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/A of 1bd7f6746cebf2fb7e39737ab25d16cc into 11ec8c4b53af4e4f84c859de910c9fe4(size=12.6 K), total size for store is 22.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:58,093 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:58,093 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/A, priority=13, startTime=1732018677615; duration=0sec 2024-11-19T12:17:58,093 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:58,093 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:A 2024-11-19T12:17:58,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:58,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018738248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:58,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:58,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:58,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018738250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:58,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018738250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:58,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:58,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018738257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:58,365 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/36396cea97fa41f2b108f0296cac2a34 2024-11-19T12:17:58,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/4e5feb2231384545889a166b165547d6 is 50, key is test_row_0/B:col10/1732018677906/Put/seqid=0 2024-11-19T12:17:58,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741903_1079 (size=12301) 2024-11-19T12:17:58,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:58,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018738554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:58,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:58,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:58,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018738554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:58,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018738554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:58,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:58,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018738559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:58,782 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/4e5feb2231384545889a166b165547d6 2024-11-19T12:17:58,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/84a0883cae4c4648a531c67fc23b55c7 is 50, key is test_row_0/C:col10/1732018677906/Put/seqid=0 2024-11-19T12:17:58,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741904_1080 (size=12301) 2024-11-19T12:17:58,826 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/84a0883cae4c4648a531c67fc23b55c7 2024-11-19T12:17:58,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/36396cea97fa41f2b108f0296cac2a34 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/36396cea97fa41f2b108f0296cac2a34 2024-11-19T12:17:58,840 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/36396cea97fa41f2b108f0296cac2a34, entries=150, sequenceid=325, filesize=12.0 K 2024-11-19T12:17:58,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/4e5feb2231384545889a166b165547d6 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/4e5feb2231384545889a166b165547d6 2024-11-19T12:17:58,849 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/4e5feb2231384545889a166b165547d6, entries=150, sequenceid=325, filesize=12.0 K 2024-11-19T12:17:58,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/84a0883cae4c4648a531c67fc23b55c7 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/84a0883cae4c4648a531c67fc23b55c7 2024-11-19T12:17:58,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/84a0883cae4c4648a531c67fc23b55c7, entries=150, sequenceid=325, filesize=12.0 K 2024-11-19T12:17:58,861 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 1bd7f6746cebf2fb7e39737ab25d16cc in 952ms, sequenceid=325, compaction requested=true 2024-11-19T12:17:58,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:58,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:17:58,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:58,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:17:58,861 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:58,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:58,861 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:58,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:17:58,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:17:58,863 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:58,863 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/A is initiating minor compaction (all files) 2024-11-19T12:17:58,863 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/A in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:58,863 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/11ec8c4b53af4e4f84c859de910c9fe4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/b851c34ae32d447c95655486a120d842, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/36396cea97fa41f2b108f0296cac2a34] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=34.3 K 2024-11-19T12:17:58,864 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11ec8c4b53af4e4f84c859de910c9fe4, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732018676668 2024-11-19T12:17:58,864 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:58,864 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/B is initiating minor compaction (all files) 2024-11-19T12:17:58,864 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/B in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:58,865 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting b851c34ae32d447c95655486a120d842, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732018676681 2024-11-19T12:17:58,865 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/115fe210146f4365abe89fd3bac409c7, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/c29c4a27514e474fae79deb4d5f671b8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/4e5feb2231384545889a166b165547d6] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=34.3 K 2024-11-19T12:17:58,865 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36396cea97fa41f2b108f0296cac2a34, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732018677896 2024-11-19T12:17:58,865 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 115fe210146f4365abe89fd3bac409c7, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732018676668 2024-11-19T12:17:58,866 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting c29c4a27514e474fae79deb4d5f671b8, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732018676681 2024-11-19T12:17:58,867 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e5feb2231384545889a166b165547d6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732018677896 2024-11-19T12:17:58,881 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#A#compaction#66 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:58,882 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/32c8280e56504c8d97cae1793a6b5d2e is 50, key is test_row_0/A:col10/1732018677906/Put/seqid=0 2024-11-19T12:17:58,885 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#B#compaction#67 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:58,886 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/55fb5358a12443cab338ebac05033ad1 is 50, key is test_row_0/B:col10/1732018677906/Put/seqid=0 2024-11-19T12:17:58,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741905_1081 (size=13051) 2024-11-19T12:17:58,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741906_1082 (size=13051) 2024-11-19T12:17:58,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-19T12:17:58,914 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-19T12:17:58,918 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:17:58,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-19T12:17:58,920 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:17:58,921 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:17:58,921 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:17:58,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-19T12:17:58,932 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/55fb5358a12443cab338ebac05033ad1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/55fb5358a12443cab338ebac05033ad1 2024-11-19T12:17:58,932 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/32c8280e56504c8d97cae1793a6b5d2e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/32c8280e56504c8d97cae1793a6b5d2e 2024-11-19T12:17:58,943 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/B of 1bd7f6746cebf2fb7e39737ab25d16cc into 55fb5358a12443cab338ebac05033ad1(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:58,943 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:58,943 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/B, priority=13, startTime=1732018678861; duration=0sec 2024-11-19T12:17:58,943 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:17:58,943 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:B 2024-11-19T12:17:58,943 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:58,945 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/A of 1bd7f6746cebf2fb7e39737ab25d16cc into 32c8280e56504c8d97cae1793a6b5d2e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:58,945 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:58,945 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/A, priority=13, startTime=1732018678861; duration=0sec 2024-11-19T12:17:58,945 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:58,945 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:A 2024-11-19T12:17:58,946 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:58,946 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/C is initiating minor compaction (all files) 2024-11-19T12:17:58,946 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/C in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:58,946 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/d8f96c7825e64689a1c83b6defe3b8a2, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/30491dae26ce4bceb7b6c36ea614ce13, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/84a0883cae4c4648a531c67fc23b55c7] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=34.3 K 2024-11-19T12:17:58,947 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting d8f96c7825e64689a1c83b6defe3b8a2, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732018676668 2024-11-19T12:17:58,948 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 30491dae26ce4bceb7b6c36ea614ce13, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732018676681 2024-11-19T12:17:58,948 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 84a0883cae4c4648a531c67fc23b55c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732018677896 2024-11-19T12:17:58,961 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#C#compaction#68 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:58,962 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/a532dc47a48e4995a6e22d7f5057485d is 50, key is test_row_0/C:col10/1732018677906/Put/seqid=0 2024-11-19T12:17:58,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741907_1083 (size=13051) 2024-11-19T12:17:59,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-19T12:17:59,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:59,065 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-19T12:17:59,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:59,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:59,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:59,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:59,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:59,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:59,077 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/616ad125d5a947b68e9406ae0aa11c2d is 50, key is test_row_0/A:col10/1732018679061/Put/seqid=0 2024-11-19T12:17:59,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741908_1084 (size=12301) 2024-11-19T12:17:59,083 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:59,084 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-19T12:17:59,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:59,085 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=344 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/616ad125d5a947b68e9406ae0aa11c2d 2024-11-19T12:17:59,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:59,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:59,085 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,096 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/de46c0f65a554be6b7ab3890c9f99825 is 50, key is test_row_0/B:col10/1732018679061/Put/seqid=0 2024-11-19T12:17:59,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018739097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018739099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018739100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018739101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741909_1085 (size=12301) 2024-11-19T12:17:59,113 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=344 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/de46c0f65a554be6b7ab3890c9f99825 2024-11-19T12:17:59,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/6e8dd928f2b8486cba39eda5710763f8 is 50, key is test_row_0/C:col10/1732018679061/Put/seqid=0 2024-11-19T12:17:59,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741910_1086 (size=12301) 2024-11-19T12:17:59,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=344 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/6e8dd928f2b8486cba39eda5710763f8 2024-11-19T12:17:59,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/616ad125d5a947b68e9406ae0aa11c2d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/616ad125d5a947b68e9406ae0aa11c2d 2024-11-19T12:17:59,151 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/616ad125d5a947b68e9406ae0aa11c2d, entries=150, sequenceid=344, filesize=12.0 K 2024-11-19T12:17:59,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/de46c0f65a554be6b7ab3890c9f99825 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/de46c0f65a554be6b7ab3890c9f99825 2024-11-19T12:17:59,182 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/de46c0f65a554be6b7ab3890c9f99825, entries=150, sequenceid=344, filesize=12.0 K 2024-11-19T12:17:59,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/6e8dd928f2b8486cba39eda5710763f8 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/6e8dd928f2b8486cba39eda5710763f8 2024-11-19T12:17:59,191 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/6e8dd928f2b8486cba39eda5710763f8, entries=150, sequenceid=344, filesize=12.0 K 2024-11-19T12:17:59,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 1bd7f6746cebf2fb7e39737ab25d16cc in 127ms, sequenceid=344, compaction requested=false 2024-11-19T12:17:59,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:59,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:59,207 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-19T12:17:59,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:59,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:59,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:59,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:59,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:59,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:59,215 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/dd433a8092c34b89a5e0cbbbba3d4e56 is 50, key is test_row_0/A:col10/1732018679100/Put/seqid=0 2024-11-19T12:17:59,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018739215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018739217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018739217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-19T12:17:59,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018739223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741911_1087 (size=12301) 2024-11-19T12:17:59,235 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/dd433a8092c34b89a5e0cbbbba3d4e56 2024-11-19T12:17:59,238 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:59,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-19T12:17:59,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:59,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:59,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:59,240 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/a6b069a5389f4331831115322b9018cd is 50, key is test_row_0/B:col10/1732018679100/Put/seqid=0 2024-11-19T12:17:59,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741912_1088 (size=12301) 2024-11-19T12:17:59,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018739321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018739327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018739327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018739330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,380 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/a532dc47a48e4995a6e22d7f5057485d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/a532dc47a48e4995a6e22d7f5057485d 2024-11-19T12:17:59,388 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/C of 1bd7f6746cebf2fb7e39737ab25d16cc into a532dc47a48e4995a6e22d7f5057485d(size=12.7 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:17:59,388 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:59,388 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/C, priority=13, startTime=1732018678861; duration=0sec 2024-11-19T12:17:59,388 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:59,388 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:C 2024-11-19T12:17:59,393 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:59,393 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-19T12:17:59,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:59,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:59,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:59,394 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-19T12:17:59,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018739526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018739531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018739531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018739533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,548 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:59,548 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-19T12:17:59,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:59,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:59,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:59,549 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,670 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/a6b069a5389f4331831115322b9018cd 2024-11-19T12:17:59,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/93783d132885471480c890b6f0bb0ff1 is 50, key is test_row_0/C:col10/1732018679100/Put/seqid=0 2024-11-19T12:17:59,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741913_1089 (size=12301) 2024-11-19T12:17:59,692 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/93783d132885471480c890b6f0bb0ff1 2024-11-19T12:17:59,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/dd433a8092c34b89a5e0cbbbba3d4e56 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/dd433a8092c34b89a5e0cbbbba3d4e56 2024-11-19T12:17:59,701 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:59,702 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-19T12:17:59,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:59,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:59,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:59,702 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/dd433a8092c34b89a5e0cbbbba3d4e56, entries=150, sequenceid=367, filesize=12.0 K 2024-11-19T12:17:59,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/a6b069a5389f4331831115322b9018cd as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/a6b069a5389f4331831115322b9018cd 2024-11-19T12:17:59,711 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/a6b069a5389f4331831115322b9018cd, entries=150, sequenceid=367, filesize=12.0 K 2024-11-19T12:17:59,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/93783d132885471480c890b6f0bb0ff1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/93783d132885471480c890b6f0bb0ff1 2024-11-19T12:17:59,718 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/93783d132885471480c890b6f0bb0ff1, entries=150, sequenceid=367, filesize=12.0 K 2024-11-19T12:17:59,719 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 1bd7f6746cebf2fb7e39737ab25d16cc in 513ms, sequenceid=367, compaction requested=true 2024-11-19T12:17:59,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:17:59,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:17:59,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:59,719 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:59,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:17:59,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:17:59,720 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:17:59,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:17:59,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:17:59,721 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:59,721 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/A is initiating minor compaction (all files) 2024-11-19T12:17:59,721 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/A in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:59,722 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/32c8280e56504c8d97cae1793a6b5d2e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/616ad125d5a947b68e9406ae0aa11c2d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/dd433a8092c34b89a5e0cbbbba3d4e56] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=36.8 K 2024-11-19T12:17:59,722 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:17:59,722 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/B is initiating minor compaction (all files) 2024-11-19T12:17:59,722 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/B in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:59,722 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/55fb5358a12443cab338ebac05033ad1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/de46c0f65a554be6b7ab3890c9f99825, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/a6b069a5389f4331831115322b9018cd] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=36.8 K 2024-11-19T12:17:59,723 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32c8280e56504c8d97cae1793a6b5d2e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732018677896 2024-11-19T12:17:59,723 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 616ad125d5a947b68e9406ae0aa11c2d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1732018677933 2024-11-19T12:17:59,723 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 55fb5358a12443cab338ebac05033ad1, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732018677896 2024-11-19T12:17:59,724 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd433a8092c34b89a5e0cbbbba3d4e56, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732018679100 2024-11-19T12:17:59,724 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting de46c0f65a554be6b7ab3890c9f99825, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1732018677933 2024-11-19T12:17:59,724 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting a6b069a5389f4331831115322b9018cd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732018679100 2024-11-19T12:17:59,736 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#B#compaction#76 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:59,736 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#A#compaction#75 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:17:59,737 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/e94b6f4ab75e47688f13adf66ca6718c is 50, key is test_row_0/B:col10/1732018679100/Put/seqid=0 2024-11-19T12:17:59,737 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/d95285061bcc4b34b50c1584fa32e5f8 is 50, key is test_row_0/A:col10/1732018679100/Put/seqid=0 2024-11-19T12:17:59,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741914_1090 (size=13153) 2024-11-19T12:17:59,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741915_1091 (size=13153) 2024-11-19T12:17:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:17:59,831 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-19T12:17:59,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:17:59,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:59,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:17:59,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:59,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:17:59,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:17:59,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/acba55aede0b43bbb0c3aabbbce90c26 is 50, key is test_row_0/A:col10/1732018679215/Put/seqid=0 2024-11-19T12:17:59,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741916_1092 (size=12301) 2024-11-19T12:17:59,855 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:17:59,855 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-19T12:17:59,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:59,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:17:59,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:17:59,856 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:17:59,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018739864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018739865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018739866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,872 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018739868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018739970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018739974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018739974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:17:59,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:17:59,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018739975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:00,008 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:00,009 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-19T12:18:00,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:00,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:00,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:00,010 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:00,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:00,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-19T12:18:00,156 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/d95285061bcc4b34b50c1584fa32e5f8 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/d95285061bcc4b34b50c1584fa32e5f8 2024-11-19T12:18:00,156 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/e94b6f4ab75e47688f13adf66ca6718c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/e94b6f4ab75e47688f13adf66ca6718c 2024-11-19T12:18:00,164 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:00,165 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-19T12:18:00,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:00,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:00,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:00,165 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:00,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:00,166 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/A of 1bd7f6746cebf2fb7e39737ab25d16cc into d95285061bcc4b34b50c1584fa32e5f8(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:00,166 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:00,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:00,166 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/A, priority=13, startTime=1732018679719; duration=0sec 2024-11-19T12:18:00,167 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/B of 1bd7f6746cebf2fb7e39737ab25d16cc into e94b6f4ab75e47688f13adf66ca6718c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:00,167 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:00,167 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/B, priority=13, startTime=1732018679719; duration=0sec 2024-11-19T12:18:00,167 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:00,167 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:00,167 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:A 2024-11-19T12:18:00,167 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:B 2024-11-19T12:18:00,167 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:00,169 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:00,170 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/C is initiating minor compaction (all files) 2024-11-19T12:18:00,170 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/C in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:00,170 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/a532dc47a48e4995a6e22d7f5057485d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/6e8dd928f2b8486cba39eda5710763f8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/93783d132885471480c890b6f0bb0ff1] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=36.8 K 2024-11-19T12:18:00,170 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting a532dc47a48e4995a6e22d7f5057485d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732018677896 2024-11-19T12:18:00,171 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e8dd928f2b8486cba39eda5710763f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1732018677933 2024-11-19T12:18:00,172 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93783d132885471480c890b6f0bb0ff1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732018679100 2024-11-19T12:18:00,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018740176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:00,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:00,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018740177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:00,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018740177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:00,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018740179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:00,186 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#C#compaction#78 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:00,186 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/f55055bfa54a41068da6c794301c2ce2 is 50, key is test_row_0/C:col10/1732018679100/Put/seqid=0 2024-11-19T12:18:00,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741917_1093 (size=13153) 2024-11-19T12:18:00,220 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/f55055bfa54a41068da6c794301c2ce2 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f55055bfa54a41068da6c794301c2ce2 2024-11-19T12:18:00,231 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/C of 1bd7f6746cebf2fb7e39737ab25d16cc into f55055bfa54a41068da6c794301c2ce2(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:00,233 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:00,234 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/C, priority=13, startTime=1732018679720; duration=0sec 2024-11-19T12:18:00,234 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:00,234 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:C 2024-11-19T12:18:00,248 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/acba55aede0b43bbb0c3aabbbce90c26 2024-11-19T12:18:00,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/d784fd79aa4340edb7dc8386fbb2e633 is 50, key is test_row_0/B:col10/1732018679215/Put/seqid=0 2024-11-19T12:18:00,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741918_1094 (size=12301) 2024-11-19T12:18:00,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/d784fd79aa4340edb7dc8386fbb2e633 2024-11-19T12:18:00,300 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/6ed2fc24565c431188653f21e59766af is 50, key is test_row_0/C:col10/1732018679215/Put/seqid=0 2024-11-19T12:18:00,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741919_1095 (size=12301) 2024-11-19T12:18:00,318 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:00,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-19T12:18:00,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:00,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:00,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:00,319 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:00,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:00,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:00,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:00,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018740480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:00,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018740482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:00,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018740483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:00,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018740484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:00,494 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:00,494 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-19T12:18:00,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:00,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:00,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:00,496 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:00,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:00,648 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:00,649 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-19T12:18:00,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:00,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:00,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:00,649 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:00,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:00,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:00,712 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/6ed2fc24565c431188653f21e59766af 2024-11-19T12:18:00,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/acba55aede0b43bbb0c3aabbbce90c26 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/acba55aede0b43bbb0c3aabbbce90c26 2024-11-19T12:18:00,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/acba55aede0b43bbb0c3aabbbce90c26, entries=150, sequenceid=382, filesize=12.0 K 2024-11-19T12:18:00,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/d784fd79aa4340edb7dc8386fbb2e633 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/d784fd79aa4340edb7dc8386fbb2e633 2024-11-19T12:18:00,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/d784fd79aa4340edb7dc8386fbb2e633, entries=150, sequenceid=382, filesize=12.0 K 2024-11-19T12:18:00,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/6ed2fc24565c431188653f21e59766af as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/6ed2fc24565c431188653f21e59766af 2024-11-19T12:18:00,741 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/6ed2fc24565c431188653f21e59766af, entries=150, sequenceid=382, filesize=12.0 K 2024-11-19T12:18:00,745 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 1bd7f6746cebf2fb7e39737ab25d16cc in 913ms, sequenceid=382, compaction requested=false 2024-11-19T12:18:00,745 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:00,802 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:00,803 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-19T12:18:00,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:00,803 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-19T12:18:00,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:18:00,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:00,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:18:00,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:00,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:18:00,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:00,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/5c19c5ca8d774109b28365bf4174d2fc is 50, key is test_row_0/A:col10/1732018679862/Put/seqid=0 2024-11-19T12:18:00,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741920_1096 (size=12301) 2024-11-19T12:18:00,836 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/5c19c5ca8d774109b28365bf4174d2fc 2024-11-19T12:18:00,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/5441207f55984f35abffa611dcb23310 is 50, key is test_row_0/B:col10/1732018679862/Put/seqid=0 2024-11-19T12:18:00,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741921_1097 (size=12301) 2024-11-19T12:18:00,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:00,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:18:01,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018740999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018741000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018741001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018741001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-19T12:18:01,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018741105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018741105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018741105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018741106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,259 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/5441207f55984f35abffa611dcb23310 2024-11-19T12:18:01,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/38f2b63b3e2f4b60a65d26a8efa30c74 is 50, key is test_row_0/C:col10/1732018679862/Put/seqid=0 2024-11-19T12:18:01,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741922_1098 (size=12301) 2024-11-19T12:18:01,294 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/38f2b63b3e2f4b60a65d26a8efa30c74 2024-11-19T12:18:01,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/5c19c5ca8d774109b28365bf4174d2fc as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/5c19c5ca8d774109b28365bf4174d2fc 2024-11-19T12:18:01,307 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/5c19c5ca8d774109b28365bf4174d2fc, entries=150, sequenceid=407, filesize=12.0 K 2024-11-19T12:18:01,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/5441207f55984f35abffa611dcb23310 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/5441207f55984f35abffa611dcb23310 2024-11-19T12:18:01,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018741309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018741309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018741309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018741309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,314 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/5441207f55984f35abffa611dcb23310, entries=150, sequenceid=407, filesize=12.0 K 2024-11-19T12:18:01,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/38f2b63b3e2f4b60a65d26a8efa30c74 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/38f2b63b3e2f4b60a65d26a8efa30c74 2024-11-19T12:18:01,322 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/38f2b63b3e2f4b60a65d26a8efa30c74, entries=150, sequenceid=407, filesize=12.0 K 2024-11-19T12:18:01,323 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 1bd7f6746cebf2fb7e39737ab25d16cc in 520ms, sequenceid=407, compaction requested=true 2024-11-19T12:18:01,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:01,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:01,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-19T12:18:01,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-19T12:18:01,326 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-19T12:18:01,326 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4030 sec 2024-11-19T12:18:01,329 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 2.4090 sec 2024-11-19T12:18:01,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:18:01,453 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-19T12:18:01,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:18:01,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:01,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:18:01,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:01,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:18:01,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:01,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/ad57c15442a64e32a0ebab963f0bdf0d is 50, key is test_row_0/A:col10/1732018681451/Put/seqid=0 2024-11-19T12:18:01,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741923_1099 (size=12301) 2024-11-19T12:18:01,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018741505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018741607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018741613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018741614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018741614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018741615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:01,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018741809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:01,869 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/ad57c15442a64e32a0ebab963f0bdf0d 2024-11-19T12:18:01,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/6a782fcb3f2c4159af1723ffd2742b0c is 50, key is test_row_0/B:col10/1732018681451/Put/seqid=0 2024-11-19T12:18:01,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741924_1100 (size=12301) 2024-11-19T12:18:02,112 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:02,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018742111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:02,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:02,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018742116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:02,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:02,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018742118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:02,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:02,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018742119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:02,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:02,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018742120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:02,294 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/6a782fcb3f2c4159af1723ffd2742b0c 2024-11-19T12:18:02,307 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/cc4994b5b9f34e9e8843455fb67762d9 is 50, key is test_row_0/C:col10/1732018681451/Put/seqid=0 2024-11-19T12:18:02,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741925_1101 (size=12301) 2024-11-19T12:18:02,321 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/cc4994b5b9f34e9e8843455fb67762d9 2024-11-19T12:18:02,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/ad57c15442a64e32a0ebab963f0bdf0d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/ad57c15442a64e32a0ebab963f0bdf0d 2024-11-19T12:18:02,335 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/ad57c15442a64e32a0ebab963f0bdf0d, entries=150, sequenceid=422, filesize=12.0 K 2024-11-19T12:18:02,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/6a782fcb3f2c4159af1723ffd2742b0c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6a782fcb3f2c4159af1723ffd2742b0c 2024-11-19T12:18:02,342 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6a782fcb3f2c4159af1723ffd2742b0c, entries=150, sequenceid=422, filesize=12.0 K 2024-11-19T12:18:02,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/cc4994b5b9f34e9e8843455fb67762d9 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/cc4994b5b9f34e9e8843455fb67762d9 2024-11-19T12:18:02,349 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/cc4994b5b9f34e9e8843455fb67762d9, entries=150, sequenceid=422, filesize=12.0 K 2024-11-19T12:18:02,351 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 1bd7f6746cebf2fb7e39737ab25d16cc in 897ms, sequenceid=422, compaction requested=true 2024-11-19T12:18:02,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:02,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:18:02,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:02,351 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:18:02,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:18:02,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:02,351 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:18:02,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:18:02,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:02,353 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:18:02,353 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:18:02,353 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/A is initiating minor compaction (all files) 2024-11-19T12:18:02,353 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/B is initiating minor compaction (all files) 2024-11-19T12:18:02,353 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/B in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:02,353 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/A in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:02,353 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/d95285061bcc4b34b50c1584fa32e5f8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/acba55aede0b43bbb0c3aabbbce90c26, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/5c19c5ca8d774109b28365bf4174d2fc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/ad57c15442a64e32a0ebab963f0bdf0d] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=48.9 K 2024-11-19T12:18:02,353 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/e94b6f4ab75e47688f13adf66ca6718c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/d784fd79aa4340edb7dc8386fbb2e633, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/5441207f55984f35abffa611dcb23310, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6a782fcb3f2c4159af1723ffd2742b0c] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=48.9 K 2024-11-19T12:18:02,354 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting e94b6f4ab75e47688f13adf66ca6718c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732018679100 2024-11-19T12:18:02,354 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting d95285061bcc4b34b50c1584fa32e5f8, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732018679100 2024-11-19T12:18:02,354 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting d784fd79aa4340edb7dc8386fbb2e633, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1732018679215 2024-11-19T12:18:02,354 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting acba55aede0b43bbb0c3aabbbce90c26, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1732018679215 2024-11-19T12:18:02,355 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 5441207f55984f35abffa611dcb23310, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732018679853 2024-11-19T12:18:02,355 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c19c5ca8d774109b28365bf4174d2fc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732018679853 2024-11-19T12:18:02,355 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a782fcb3f2c4159af1723ffd2742b0c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1732018680999 2024-11-19T12:18:02,355 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad57c15442a64e32a0ebab963f0bdf0d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1732018680999 2024-11-19T12:18:02,376 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#A#compaction#87 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:02,378 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/6b75f1e4527943cab26d9daed87e1bd8 is 50, key is test_row_0/A:col10/1732018681451/Put/seqid=0 2024-11-19T12:18:02,378 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#B#compaction#88 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:02,379 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/78f0db79d0db4d4a98bb90d00a1f7cbb is 50, key is test_row_0/B:col10/1732018681451/Put/seqid=0 2024-11-19T12:18:02,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741927_1103 (size=13289) 2024-11-19T12:18:02,407 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/78f0db79d0db4d4a98bb90d00a1f7cbb as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/78f0db79d0db4d4a98bb90d00a1f7cbb 2024-11-19T12:18:02,415 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/B of 1bd7f6746cebf2fb7e39737ab25d16cc into 78f0db79d0db4d4a98bb90d00a1f7cbb(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:02,415 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:02,415 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/B, priority=12, startTime=1732018682351; duration=0sec 2024-11-19T12:18:02,415 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:02,415 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:B 2024-11-19T12:18:02,415 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:18:02,417 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:18:02,418 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/C is initiating minor compaction (all files) 2024-11-19T12:18:02,418 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/C in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:02,418 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f55055bfa54a41068da6c794301c2ce2, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/6ed2fc24565c431188653f21e59766af, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/38f2b63b3e2f4b60a65d26a8efa30c74, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/cc4994b5b9f34e9e8843455fb67762d9] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=48.9 K 2024-11-19T12:18:02,420 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting f55055bfa54a41068da6c794301c2ce2, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732018679100 2024-11-19T12:18:02,420 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ed2fc24565c431188653f21e59766af, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1732018679215 2024-11-19T12:18:02,421 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 38f2b63b3e2f4b60a65d26a8efa30c74, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732018679853 2024-11-19T12:18:02,421 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting cc4994b5b9f34e9e8843455fb67762d9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1732018680999 2024-11-19T12:18:02,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741926_1102 (size=13289) 2024-11-19T12:18:02,433 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/6b75f1e4527943cab26d9daed87e1bd8 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/6b75f1e4527943cab26d9daed87e1bd8 2024-11-19T12:18:02,441 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/A of 1bd7f6746cebf2fb7e39737ab25d16cc into 6b75f1e4527943cab26d9daed87e1bd8(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:02,441 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:02,441 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/A, priority=12, startTime=1732018682351; duration=0sec 2024-11-19T12:18:02,441 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:02,441 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:A 2024-11-19T12:18:02,446 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#C#compaction#89 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:02,447 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/f05c7e8b68b744eeba259e81c202fbd0 is 50, key is test_row_0/C:col10/1732018681451/Put/seqid=0 2024-11-19T12:18:02,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741928_1104 (size=13289) 2024-11-19T12:18:02,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:18:02,619 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-19T12:18:02,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:18:02,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:02,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:18:02,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:02,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:18:02,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:02,625 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/c3e424d276ef4d50b74de13bd51380d9 is 50, key is test_row_0/A:col10/1732018682617/Put/seqid=0 2024-11-19T12:18:02,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741929_1105 (size=14741) 2024-11-19T12:18:02,646 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/c3e424d276ef4d50b74de13bd51380d9 2024-11-19T12:18:02,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:02,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018742651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:02,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/f72b1bbd16c44b6b84ea68e7a34d83fa is 50, key is test_row_0/B:col10/1732018682617/Put/seqid=0 2024-11-19T12:18:02,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741930_1106 (size=12301) 2024-11-19T12:18:02,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:02,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018742754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:02,862 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/f05c7e8b68b744eeba259e81c202fbd0 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f05c7e8b68b744eeba259e81c202fbd0 2024-11-19T12:18:02,869 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/C of 1bd7f6746cebf2fb7e39737ab25d16cc into f05c7e8b68b744eeba259e81c202fbd0(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:02,869 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:02,869 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/C, priority=12, startTime=1732018682351; duration=0sec 2024-11-19T12:18:02,869 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:02,869 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:C 2024-11-19T12:18:02,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:02,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018742957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:03,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-19T12:18:03,028 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-19T12:18:03,030 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:03,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-19T12:18:03,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-19T12:18:03,032 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:03,033 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:03,033 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:03,095 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/f72b1bbd16c44b6b84ea68e7a34d83fa 2024-11-19T12:18:03,105 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/2a83737ca89047278d480f20591ee6b1 is 50, key is test_row_0/C:col10/1732018682617/Put/seqid=0 2024-11-19T12:18:03,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741931_1107 (size=12301) 2024-11-19T12:18:03,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:03,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018743122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:03,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:03,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018743126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:03,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:03,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018743129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:03,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-19T12:18:03,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:03,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018743133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:03,185 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:03,185 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-19T12:18:03,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:03,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:03,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:03,186 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:03,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:03,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:03,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:03,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018743261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:03,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-19T12:18:03,338 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:03,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-19T12:18:03,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:03,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:03,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:03,339 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:03,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:03,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:03,492 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:03,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-19T12:18:03,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:03,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:03,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:03,493 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:03,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:03,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:03,509 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/2a83737ca89047278d480f20591ee6b1 2024-11-19T12:18:03,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/c3e424d276ef4d50b74de13bd51380d9 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/c3e424d276ef4d50b74de13bd51380d9 2024-11-19T12:18:03,521 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/c3e424d276ef4d50b74de13bd51380d9, entries=200, sequenceid=446, filesize=14.4 K 2024-11-19T12:18:03,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/f72b1bbd16c44b6b84ea68e7a34d83fa as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/f72b1bbd16c44b6b84ea68e7a34d83fa 2024-11-19T12:18:03,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/f72b1bbd16c44b6b84ea68e7a34d83fa, entries=150, sequenceid=446, filesize=12.0 K 2024-11-19T12:18:03,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/2a83737ca89047278d480f20591ee6b1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/2a83737ca89047278d480f20591ee6b1 2024-11-19T12:18:03,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/2a83737ca89047278d480f20591ee6b1, entries=150, sequenceid=446, filesize=12.0 K 2024-11-19T12:18:03,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 1bd7f6746cebf2fb7e39737ab25d16cc in 917ms, sequenceid=446, compaction requested=false 2024-11-19T12:18:03,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:03,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-19T12:18:03,645 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:03,646 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-19T12:18:03,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:03,646 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-19T12:18:03,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:18:03,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:03,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:18:03,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:03,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:18:03,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:03,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/8265ac4cfb79481587be7c429894cb14 is 50, key is test_row_0/A:col10/1732018682648/Put/seqid=0 2024-11-19T12:18:03,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741932_1108 (size=12301) 2024-11-19T12:18:03,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:18:03,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:03,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:03,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018743819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:03,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:03,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018743922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:04,068 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/8265ac4cfb79481587be7c429894cb14 2024-11-19T12:18:04,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/bbed593ecce34e299ee64de9a1577630 is 50, key is test_row_0/B:col10/1732018682648/Put/seqid=0 2024-11-19T12:18:04,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741933_1109 (size=12301) 2024-11-19T12:18:04,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:04,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018744125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:04,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-19T12:18:04,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:04,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018744428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:04,496 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/bbed593ecce34e299ee64de9a1577630 2024-11-19T12:18:04,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/82565a22e1e547c28fd4b8a59f8e2440 is 50, key is test_row_0/C:col10/1732018682648/Put/seqid=0 2024-11-19T12:18:04,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741934_1110 (size=12301) 2024-11-19T12:18:04,918 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/82565a22e1e547c28fd4b8a59f8e2440 2024-11-19T12:18:04,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/8265ac4cfb79481587be7c429894cb14 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/8265ac4cfb79481587be7c429894cb14 2024-11-19T12:18:04,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:04,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018744931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:04,938 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/8265ac4cfb79481587be7c429894cb14, entries=150, sequenceid=461, filesize=12.0 K 2024-11-19T12:18:04,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/bbed593ecce34e299ee64de9a1577630 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/bbed593ecce34e299ee64de9a1577630 2024-11-19T12:18:04,944 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/bbed593ecce34e299ee64de9a1577630, entries=150, sequenceid=461, filesize=12.0 K 2024-11-19T12:18:04,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/82565a22e1e547c28fd4b8a59f8e2440 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/82565a22e1e547c28fd4b8a59f8e2440 2024-11-19T12:18:04,952 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/82565a22e1e547c28fd4b8a59f8e2440, entries=150, sequenceid=461, filesize=12.0 K 2024-11-19T12:18:04,953 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 1bd7f6746cebf2fb7e39737ab25d16cc in 1307ms, sequenceid=461, compaction requested=true 2024-11-19T12:18:04,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:04,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:04,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-19T12:18:04,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-19T12:18:04,958 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-19T12:18:04,958 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9220 sec 2024-11-19T12:18:04,961 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.9280 sec 2024-11-19T12:18:05,135 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-19T12:18:05,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:18:05,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-19T12:18:05,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:05,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:18:05,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:05,136 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-19T12:18:05,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:18:05,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:05,139 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:05,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-19T12:18:05,142 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:05,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-19T12:18:05,143 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:05,143 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:05,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:18:05,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/17f9fcd4743c495fb87ab836957103ea is 50, key is test_row_0/A:col10/1732018683817/Put/seqid=0 2024-11-19T12:18:05,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018745159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018745159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018745160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018745161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741935_1111 (size=12301) 2024-11-19T12:18:05,180 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/17f9fcd4743c495fb87ab836957103ea 2024-11-19T12:18:05,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/08273ae763d2454fa639dcd0f679485c is 50, key is test_row_0/B:col10/1732018683817/Put/seqid=0 2024-11-19T12:18:05,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741936_1112 (size=12301) 2024-11-19T12:18:05,239 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/08273ae763d2454fa639dcd0f679485c 2024-11-19T12:18:05,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-19T12:18:05,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b2063eb97fbd4225b2922fb018375559 is 50, key is test_row_0/C:col10/1732018683817/Put/seqid=0 2024-11-19T12:18:05,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018745264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018745265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018745265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018745265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741937_1113 (size=12301) 2024-11-19T12:18:05,272 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b2063eb97fbd4225b2922fb018375559 2024-11-19T12:18:05,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/17f9fcd4743c495fb87ab836957103ea as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/17f9fcd4743c495fb87ab836957103ea 2024-11-19T12:18:05,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/17f9fcd4743c495fb87ab836957103ea, entries=150, sequenceid=484, filesize=12.0 K 2024-11-19T12:18:05,296 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:05,297 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-19T12:18:05,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:05,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:05,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:05,297 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:05,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:05,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/08273ae763d2454fa639dcd0f679485c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/08273ae763d2454fa639dcd0f679485c 2024-11-19T12:18:05,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:05,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/08273ae763d2454fa639dcd0f679485c, entries=150, sequenceid=484, filesize=12.0 K 2024-11-19T12:18:05,317 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b2063eb97fbd4225b2922fb018375559 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b2063eb97fbd4225b2922fb018375559 2024-11-19T12:18:05,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b2063eb97fbd4225b2922fb018375559, entries=150, sequenceid=484, filesize=12.0 K 2024-11-19T12:18:05,326 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 1bd7f6746cebf2fb7e39737ab25d16cc in 191ms, sequenceid=484, compaction requested=true 2024-11-19T12:18:05,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:05,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:18:05,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:05,327 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:18:05,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:18:05,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:05,327 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:18:05,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:18:05,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:05,329 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:18:05,329 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/B is initiating minor compaction (all files) 2024-11-19T12:18:05,329 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/B in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:05,329 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/78f0db79d0db4d4a98bb90d00a1f7cbb, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/f72b1bbd16c44b6b84ea68e7a34d83fa, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/bbed593ecce34e299ee64de9a1577630, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/08273ae763d2454fa639dcd0f679485c] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=49.0 K 2024-11-19T12:18:05,330 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52632 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:18:05,330 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/A is initiating minor compaction (all files) 2024-11-19T12:18:05,331 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/A in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:05,331 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/6b75f1e4527943cab26d9daed87e1bd8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/c3e424d276ef4d50b74de13bd51380d9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/8265ac4cfb79481587be7c429894cb14, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/17f9fcd4743c495fb87ab836957103ea] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=51.4 K 2024-11-19T12:18:05,331 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 78f0db79d0db4d4a98bb90d00a1f7cbb, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1732018680999 2024-11-19T12:18:05,332 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b75f1e4527943cab26d9daed87e1bd8, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1732018680999 2024-11-19T12:18:05,332 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting f72b1bbd16c44b6b84ea68e7a34d83fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=446, earliestPutTs=1732018681495 2024-11-19T12:18:05,333 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3e424d276ef4d50b74de13bd51380d9, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=446, earliestPutTs=1732018681495 2024-11-19T12:18:05,333 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting bbed593ecce34e299ee64de9a1577630, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=461, earliestPutTs=1732018682639 2024-11-19T12:18:05,334 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8265ac4cfb79481587be7c429894cb14, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=461, earliestPutTs=1732018682639 2024-11-19T12:18:05,334 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 08273ae763d2454fa639dcd0f679485c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1732018683813 2024-11-19T12:18:05,335 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17f9fcd4743c495fb87ab836957103ea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1732018683813 2024-11-19T12:18:05,352 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#A#compaction#99 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:05,352 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#B#compaction#100 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:05,353 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/03efea52cd134160b2a2ea2e94beea14 is 50, key is test_row_0/A:col10/1732018683817/Put/seqid=0 2024-11-19T12:18:05,353 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/a496978e16c845d38bdf33e54136a023 is 50, key is test_row_0/B:col10/1732018683817/Put/seqid=0 2024-11-19T12:18:05,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741938_1114 (size=13425) 2024-11-19T12:18:05,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741939_1115 (size=13425) 2024-11-19T12:18:05,373 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/a496978e16c845d38bdf33e54136a023 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/a496978e16c845d38bdf33e54136a023 2024-11-19T12:18:05,382 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/B of 1bd7f6746cebf2fb7e39737ab25d16cc into a496978e16c845d38bdf33e54136a023(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:05,382 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:05,382 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/B, priority=12, startTime=1732018685327; duration=0sec 2024-11-19T12:18:05,382 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:05,382 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:B 2024-11-19T12:18:05,382 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:18:05,385 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:18:05,385 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/C is initiating minor compaction (all files) 2024-11-19T12:18:05,385 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/C in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:05,386 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f05c7e8b68b744eeba259e81c202fbd0, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/2a83737ca89047278d480f20591ee6b1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/82565a22e1e547c28fd4b8a59f8e2440, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b2063eb97fbd4225b2922fb018375559] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=49.0 K 2024-11-19T12:18:05,386 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting f05c7e8b68b744eeba259e81c202fbd0, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1732018680999 2024-11-19T12:18:05,387 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a83737ca89047278d480f20591ee6b1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=446, earliestPutTs=1732018681495 2024-11-19T12:18:05,387 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 82565a22e1e547c28fd4b8a59f8e2440, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=461, earliestPutTs=1732018682639 2024-11-19T12:18:05,388 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting b2063eb97fbd4225b2922fb018375559, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1732018683813 2024-11-19T12:18:05,414 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#C#compaction#101 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:05,415 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/42f29ef5fd56468281876c7a2cd7586a is 50, key is test_row_0/C:col10/1732018683817/Put/seqid=0 2024-11-19T12:18:05,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741940_1116 (size=13425) 2024-11-19T12:18:05,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-19T12:18:05,449 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/42f29ef5fd56468281876c7a2cd7586a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/42f29ef5fd56468281876c7a2cd7586a 2024-11-19T12:18:05,450 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:05,450 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-19T12:18:05,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:05,451 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-19T12:18:05,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:18:05,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:05,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:18:05,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:05,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:18:05,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:05,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/7de2173a7fa9437ca09f3ae17396c4f1 is 50, key is test_row_0/A:col10/1732018685158/Put/seqid=0 2024-11-19T12:18:05,463 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/C of 1bd7f6746cebf2fb7e39737ab25d16cc into 42f29ef5fd56468281876c7a2cd7586a(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:05,463 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:05,463 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/C, priority=12, startTime=1732018685327; duration=0sec 2024-11-19T12:18:05,464 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:05,464 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:C 2024-11-19T12:18:05,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:05,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:18:05,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741941_1117 (size=12301) 2024-11-19T12:18:05,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018745520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018745528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018745531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018745533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018745635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018745636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018745636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018745644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-19T12:18:05,778 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/03efea52cd134160b2a2ea2e94beea14 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/03efea52cd134160b2a2ea2e94beea14 2024-11-19T12:18:05,787 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/A of 1bd7f6746cebf2fb7e39737ab25d16cc into 03efea52cd134160b2a2ea2e94beea14(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:05,788 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:05,788 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/A, priority=12, startTime=1732018685326; duration=0sec 2024-11-19T12:18:05,788 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:05,788 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:A 2024-11-19T12:18:05,846 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018745844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,846 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018745844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018745853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018745853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,937 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=498 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/7de2173a7fa9437ca09f3ae17396c4f1 2024-11-19T12:18:05,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:05,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018745942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:05,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/78b85e41ad8449968fa19d2693362e42 is 50, key is test_row_0/B:col10/1732018685158/Put/seqid=0 2024-11-19T12:18:06,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741942_1118 (size=12301) 2024-11-19T12:18:06,005 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=498 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/78b85e41ad8449968fa19d2693362e42 2024-11-19T12:18:06,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/f0c8f66f79eb4c1c85cccc08d67efe25 is 50, key is test_row_0/C:col10/1732018685158/Put/seqid=0 2024-11-19T12:18:06,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741943_1119 (size=12301) 2024-11-19T12:18:06,140 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=498 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/f0c8f66f79eb4c1c85cccc08d67efe25 2024-11-19T12:18:06,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/7de2173a7fa9437ca09f3ae17396c4f1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/7de2173a7fa9437ca09f3ae17396c4f1 2024-11-19T12:18:06,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018746148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:06,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018746156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:06,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018746157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:06,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:06,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018746159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:06,163 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/7de2173a7fa9437ca09f3ae17396c4f1, entries=150, sequenceid=498, filesize=12.0 K 2024-11-19T12:18:06,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/78b85e41ad8449968fa19d2693362e42 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/78b85e41ad8449968fa19d2693362e42 2024-11-19T12:18:06,180 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/78b85e41ad8449968fa19d2693362e42, entries=150, sequenceid=498, filesize=12.0 K 2024-11-19T12:18:06,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/f0c8f66f79eb4c1c85cccc08d67efe25 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f0c8f66f79eb4c1c85cccc08d67efe25 2024-11-19T12:18:06,187 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f0c8f66f79eb4c1c85cccc08d67efe25, entries=150, sequenceid=498, filesize=12.0 K 2024-11-19T12:18:06,200 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 1bd7f6746cebf2fb7e39737ab25d16cc in 749ms, sequenceid=498, compaction requested=false 2024-11-19T12:18:06,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:06,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:06,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-19T12:18:06,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-19T12:18:06,203 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-19T12:18:06,204 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0590 sec 2024-11-19T12:18:06,205 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.0640 sec 2024-11-19T12:18:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-19T12:18:06,250 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-19T12:18:06,252 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:06,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-19T12:18:06,254 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:06,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-19T12:18:06,254 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:06,254 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:06,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-19T12:18:06,407 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:06,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-19T12:18:06,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:06,408 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-19T12:18:06,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:18:06,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:06,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:18:06,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:06,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:18:06,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:06,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/cd9c05f63f974c31a10aa9de294a8d30 is 50, key is test_row_0/A:col10/1732018685527/Put/seqid=0 2024-11-19T12:18:06,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741944_1120 (size=12301) 2024-11-19T12:18:06,455 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/cd9c05f63f974c31a10aa9de294a8d30 2024-11-19T12:18:06,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/92aa32d2c7f54ac5a73c4deb4f40990f is 50, key is test_row_0/B:col10/1732018685527/Put/seqid=0 2024-11-19T12:18:06,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741945_1121 (size=12301) 2024-11-19T12:18:06,504 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/92aa32d2c7f54ac5a73c4deb4f40990f 2024-11-19T12:18:06,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b74a74649a4f4ff2a8b803df48248892 is 50, key is test_row_0/C:col10/1732018685527/Put/seqid=0 2024-11-19T12:18:06,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741946_1122 (size=12301) 2024-11-19T12:18:06,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-19T12:18:06,572 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b74a74649a4f4ff2a8b803df48248892 2024-11-19T12:18:06,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/cd9c05f63f974c31a10aa9de294a8d30 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/cd9c05f63f974c31a10aa9de294a8d30 2024-11-19T12:18:06,615 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/cd9c05f63f974c31a10aa9de294a8d30, entries=150, sequenceid=523, filesize=12.0 K 2024-11-19T12:18:06,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/92aa32d2c7f54ac5a73c4deb4f40990f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/92aa32d2c7f54ac5a73c4deb4f40990f 2024-11-19T12:18:06,632 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/92aa32d2c7f54ac5a73c4deb4f40990f, entries=150, sequenceid=523, filesize=12.0 K 2024-11-19T12:18:06,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b74a74649a4f4ff2a8b803df48248892 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b74a74649a4f4ff2a8b803df48248892 2024-11-19T12:18:06,642 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b74a74649a4f4ff2a8b803df48248892, entries=150, sequenceid=523, filesize=12.0 K 2024-11-19T12:18:06,643 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for 1bd7f6746cebf2fb7e39737ab25d16cc in 234ms, sequenceid=523, compaction requested=true 2024-11-19T12:18:06,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:06,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:06,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-19T12:18:06,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-19T12:18:06,647 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-19T12:18:06,647 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 390 msec 2024-11-19T12:18:06,657 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 395 msec 2024-11-19T12:18:06,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:18:06,703 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-19T12:18:06,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:18:06,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:06,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:18:06,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:06,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:18:06,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:06,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/865034f0f38a4ed3ab541bd9b0e460ad is 50, key is test_row_0/A:col10/1732018686703/Put/seqid=0 2024-11-19T12:18:06,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:06,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018746813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:06,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:06,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018746814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:06,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741947_1123 (size=22065) 2024-11-19T12:18:06,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:06,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018746825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:06,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-19T12:18:06,857 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-19T12:18:06,859 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:06,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-19T12:18:06,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:06,860 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:06,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018746815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:06,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-19T12:18:06,864 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:06,864 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:06,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:06,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018746932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:06,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:06,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018746940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:06,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:06,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018746955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:06,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-19T12:18:06,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:06,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018746974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:07,023 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:07,025 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-19T12:18:07,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:07,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:07,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:07,026 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:07,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018747153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:07,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:07,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018747153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:07,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:07,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018747159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:07,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-19T12:18:07,179 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:07,179 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-19T12:18:07,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:07,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:07,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:07,180 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:07,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018747180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:07,249 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/865034f0f38a4ed3ab541bd9b0e460ad 2024-11-19T12:18:07,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/acda45cfe510487bab1864841fd3a833 is 50, key is test_row_0/B:col10/1732018686703/Put/seqid=0 2024-11-19T12:18:07,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741948_1124 (size=12301) 2024-11-19T12:18:07,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/acda45cfe510487bab1864841fd3a833 2024-11-19T12:18:07,312 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/54d11889ff714ec3a6a94f1f453c416a is 50, key is test_row_0/C:col10/1732018686703/Put/seqid=0 2024-11-19T12:18:07,333 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:07,334 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-19T12:18:07,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:07,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:07,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:07,334 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741949_1125 (size=12301) 2024-11-19T12:18:07,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/54d11889ff714ec3a6a94f1f453c416a 2024-11-19T12:18:07,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/865034f0f38a4ed3ab541bd9b0e460ad as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/865034f0f38a4ed3ab541bd9b0e460ad 2024-11-19T12:18:07,372 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/865034f0f38a4ed3ab541bd9b0e460ad, entries=350, sequenceid=535, filesize=21.5 K 2024-11-19T12:18:07,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/acda45cfe510487bab1864841fd3a833 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/acda45cfe510487bab1864841fd3a833 2024-11-19T12:18:07,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/acda45cfe510487bab1864841fd3a833, entries=150, sequenceid=535, filesize=12.0 K 2024-11-19T12:18:07,428 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-19T12:18:07,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/54d11889ff714ec3a6a94f1f453c416a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/54d11889ff714ec3a6a94f1f453c416a 2024-11-19T12:18:07,441 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/54d11889ff714ec3a6a94f1f453c416a, entries=150, sequenceid=535, filesize=12.0 K 2024-11-19T12:18:07,444 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 1bd7f6746cebf2fb7e39737ab25d16cc in 741ms, sequenceid=535, compaction requested=true 2024-11-19T12:18:07,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:07,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:18:07,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:07,445 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:18:07,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:18:07,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:07,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:18:07,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-19T12:18:07,445 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:18:07,446 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 60092 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:18:07,446 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/A is initiating minor compaction (all files) 2024-11-19T12:18:07,447 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/A in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:07,447 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/03efea52cd134160b2a2ea2e94beea14, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/7de2173a7fa9437ca09f3ae17396c4f1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/cd9c05f63f974c31a10aa9de294a8d30, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/865034f0f38a4ed3ab541bd9b0e460ad] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=58.7 K 2024-11-19T12:18:07,447 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03efea52cd134160b2a2ea2e94beea14, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1732018683813 2024-11-19T12:18:07,448 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7de2173a7fa9437ca09f3ae17396c4f1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=498, earliestPutTs=1732018685156 2024-11-19T12:18:07,449 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50328 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:18:07,449 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/B is initiating minor compaction (all files) 2024-11-19T12:18:07,449 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/B in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:07,449 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/a496978e16c845d38bdf33e54136a023, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/78b85e41ad8449968fa19d2693362e42, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/92aa32d2c7f54ac5a73c4deb4f40990f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/acda45cfe510487bab1864841fd3a833] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=49.1 K 2024-11-19T12:18:07,449 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd9c05f63f974c31a10aa9de294a8d30, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=523, earliestPutTs=1732018685506 2024-11-19T12:18:07,450 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting a496978e16c845d38bdf33e54136a023, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1732018683813 2024-11-19T12:18:07,450 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 78b85e41ad8449968fa19d2693362e42, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=498, earliestPutTs=1732018685156 2024-11-19T12:18:07,450 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 865034f0f38a4ed3ab541bd9b0e460ad, keycount=350, bloomtype=ROW, size=21.5 K, encoding=NONE, compression=NONE, seqNum=535, earliestPutTs=1732018686655 2024-11-19T12:18:07,452 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 92aa32d2c7f54ac5a73c4deb4f40990f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=523, earliestPutTs=1732018685506 2024-11-19T12:18:07,454 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting acda45cfe510487bab1864841fd3a833, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=535, earliestPutTs=1732018686679 2024-11-19T12:18:07,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-19T12:18:07,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:18:07,471 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-19T12:18:07,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:18:07,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:07,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:18:07,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:07,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:18:07,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:07,475 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#B#compaction#111 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:07,476 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/98672f6138264f2e8da7971ddeafca88 is 50, key is test_row_0/B:col10/1732018686703/Put/seqid=0 2024-11-19T12:18:07,486 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/9bc89c62b9ed411cb4fe74addbf74f80 is 50, key is test_row_0/A:col10/1732018686814/Put/seqid=0 2024-11-19T12:18:07,489 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:07,490 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#A#compaction#113 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:07,491 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/8f01f4f636d243e89435369583b775a1 is 50, key is test_row_0/A:col10/1732018686703/Put/seqid=0 2024-11-19T12:18:07,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-19T12:18:07,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:07,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018747497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:07,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:07,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018747500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:07,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:07,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018747501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:07,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:07,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018747502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:07,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:07,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:07,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:07,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741951_1127 (size=14741) 2024-11-19T12:18:07,555 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=560 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/9bc89c62b9ed411cb4fe74addbf74f80 2024-11-19T12:18:07,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741950_1126 (size=13561) 2024-11-19T12:18:07,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741952_1128 (size=13561) 2024-11-19T12:18:07,590 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/98672f6138264f2e8da7971ddeafca88 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/98672f6138264f2e8da7971ddeafca88 2024-11-19T12:18:07,597 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/B of 1bd7f6746cebf2fb7e39737ab25d16cc into 98672f6138264f2e8da7971ddeafca88(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:07,597 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:07,597 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/B, priority=12, startTime=1732018687445; duration=0sec 2024-11-19T12:18:07,597 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:07,597 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:B 2024-11-19T12:18:07,597 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:18:07,599 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/8f01f4f636d243e89435369583b775a1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/8f01f4f636d243e89435369583b775a1 2024-11-19T12:18:07,600 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50328 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:18:07,600 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/C is initiating minor compaction (all files) 2024-11-19T12:18:07,600 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/C in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:07,600 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/42f29ef5fd56468281876c7a2cd7586a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f0c8f66f79eb4c1c85cccc08d67efe25, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b74a74649a4f4ff2a8b803df48248892, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/54d11889ff714ec3a6a94f1f453c416a] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=49.1 K 2024-11-19T12:18:07,601 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 42f29ef5fd56468281876c7a2cd7586a, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1732018683813 2024-11-19T12:18:07,602 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting f0c8f66f79eb4c1c85cccc08d67efe25, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=498, earliestPutTs=1732018685156 2024-11-19T12:18:07,603 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting b74a74649a4f4ff2a8b803df48248892, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=523, earliestPutTs=1732018685506 2024-11-19T12:18:07,604 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 54d11889ff714ec3a6a94f1f453c416a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=535, earliestPutTs=1732018686679 2024-11-19T12:18:07,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:07,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018747602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:07,607 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/A of 1bd7f6746cebf2fb7e39737ab25d16cc into 8f01f4f636d243e89435369583b775a1(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:07,607 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:07,607 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/A, priority=12, startTime=1732018687444; duration=0sec 2024-11-19T12:18:07,607 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:07,607 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:A 2024-11-19T12:18:07,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:07,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018747604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:07,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:07,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018747608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:07,656 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/f47fce6b84314b24845e274d3b2fe000 is 50, key is test_row_0/B:col10/1732018686814/Put/seqid=0 2024-11-19T12:18:07,671 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#C#compaction#115 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:07,672 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/369e405d0c874bcb87489ecf86a972cd is 50, key is test_row_0/C:col10/1732018686703/Put/seqid=0 2024-11-19T12:18:07,676 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:07,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-19T12:18:07,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:07,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:07,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:07,680 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741954_1130 (size=13561) 2024-11-19T12:18:07,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741953_1129 (size=12301) 2024-11-19T12:18:07,733 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/369e405d0c874bcb87489ecf86a972cd as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/369e405d0c874bcb87489ecf86a972cd 2024-11-19T12:18:07,756 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/C of 1bd7f6746cebf2fb7e39737ab25d16cc into 369e405d0c874bcb87489ecf86a972cd(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:07,757 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:07,757 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/C, priority=12, startTime=1732018687445; duration=0sec 2024-11-19T12:18:07,757 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:07,757 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:C 2024-11-19T12:18:07,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:07,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018747811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:07,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:07,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018747828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:07,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:07,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018747831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:07,852 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:07,865 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-19T12:18:07,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:07,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:07,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:07,865 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:07,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:07,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41250 deadline: 1732018747959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:07,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-19T12:18:07,968 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., hostname=af314c41f984,36047,1732018661455, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:18:08,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:08,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1732018748018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:08,026 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:08,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-19T12:18:08,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:08,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:08,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:08,028 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:08,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018748124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:08,133 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=560 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/f47fce6b84314b24845e274d3b2fe000 2024-11-19T12:18:08,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018748138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:08,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:08,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018748134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:08,154 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b18d3853509f47d1b35bab087be6a2e2 is 50, key is test_row_0/C:col10/1732018686814/Put/seqid=0 2024-11-19T12:18:08,181 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:08,181 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-19T12:18:08,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:08,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:08,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:08,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741955_1131 (size=12301) 2024-11-19T12:18:08,339 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:08,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-19T12:18:08,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:08,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:08,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:08,340 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,492 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:08,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-19T12:18:08,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:08,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:08,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:08,493 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=560 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b18d3853509f47d1b35bab087be6a2e2 2024-11-19T12:18:08,627 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/9bc89c62b9ed411cb4fe74addbf74f80 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/9bc89c62b9ed411cb4fe74addbf74f80 2024-11-19T12:18:08,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:08,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732018748627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:08,632 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/9bc89c62b9ed411cb4fe74addbf74f80, entries=200, sequenceid=560, filesize=14.4 K 2024-11-19T12:18:08,633 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/f47fce6b84314b24845e274d3b2fe000 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/f47fce6b84314b24845e274d3b2fe000 2024-11-19T12:18:08,648 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/f47fce6b84314b24845e274d3b2fe000, entries=150, sequenceid=560, filesize=12.0 K 2024-11-19T12:18:08,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/b18d3853509f47d1b35bab087be6a2e2 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b18d3853509f47d1b35bab087be6a2e2 2024-11-19T12:18:08,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:08,668 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b18d3853509f47d1b35bab087be6a2e2, entries=150, sequenceid=560, filesize=12.0 K 2024-11-19T12:18:08,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1732018748667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:08,670 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 1bd7f6746cebf2fb7e39737ab25d16cc in 1199ms, sequenceid=560, compaction requested=false 2024-11-19T12:18:08,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:08,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:18:08,671 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:08,672 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-19T12:18:08,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:18:08,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:08,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:18:08,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:08,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:18:08,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:08,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-19T12:18:08,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:08,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:08,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:08,689 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/807190dbe300451db6143cc5b3ea24ca is 50, key is test_row_0/A:col10/1732018687498/Put/seqid=0 2024-11-19T12:18:08,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018748796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:08,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741956_1132 (size=17181) 2024-11-19T12:18:08,802 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=577 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/807190dbe300451db6143cc5b3ea24ca 2024-11-19T12:18:08,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/5e6930021dd94f47932adaf6b2456c43 is 50, key is test_row_0/B:col10/1732018687498/Put/seqid=0 2024-11-19T12:18:08,827 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62cfc6db to 127.0.0.1:64186 2024-11-19T12:18:08,827 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:08,828 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08b52656 to 127.0.0.1:64186 2024-11-19T12:18:08,828 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:08,829 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x634dc49c to 127.0.0.1:64186 2024-11-19T12:18:08,830 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:08,831 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x691cbc80 to 127.0.0.1:64186 2024-11-19T12:18:08,831 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:08,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741957_1133 (size=12301) 2024-11-19T12:18:08,838 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=577 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/5e6930021dd94f47932adaf6b2456c43 2024-11-19T12:18:08,847 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:08,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-19T12:18:08,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:08,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:08,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:08,848 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:08,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/f36e7944edd347cfabb01f7dc6b47dfc is 50, key is test_row_0/C:col10/1732018687498/Put/seqid=0 2024-11-19T12:18:08,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741958_1134 (size=12301) 2024-11-19T12:18:08,886 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=577 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/f36e7944edd347cfabb01f7dc6b47dfc 2024-11-19T12:18:08,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:08,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41238 deadline: 1732018748900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:08,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/807190dbe300451db6143cc5b3ea24ca as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/807190dbe300451db6143cc5b3ea24ca 2024-11-19T12:18:08,929 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/807190dbe300451db6143cc5b3ea24ca, entries=250, sequenceid=577, filesize=16.8 K 2024-11-19T12:18:08,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/5e6930021dd94f47932adaf6b2456c43 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/5e6930021dd94f47932adaf6b2456c43 2024-11-19T12:18:08,940 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/5e6930021dd94f47932adaf6b2456c43, entries=150, sequenceid=577, filesize=12.0 K 2024-11-19T12:18:08,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/f36e7944edd347cfabb01f7dc6b47dfc as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f36e7944edd347cfabb01f7dc6b47dfc 2024-11-19T12:18:08,952 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f36e7944edd347cfabb01f7dc6b47dfc, entries=150, sequenceid=577, filesize=12.0 K 2024-11-19T12:18:08,955 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 1bd7f6746cebf2fb7e39737ab25d16cc in 284ms, sequenceid=577, compaction requested=true 2024-11-19T12:18:08,955 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:08,955 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:08,956 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 45483 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:08,957 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/A is initiating minor compaction (all files) 2024-11-19T12:18:08,957 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/A in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:08,957 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/8f01f4f636d243e89435369583b775a1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/9bc89c62b9ed411cb4fe74addbf74f80, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/807190dbe300451db6143cc5b3ea24ca] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=44.4 K 2024-11-19T12:18:08,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:18:08,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:08,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:18:08,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:08,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1bd7f6746cebf2fb7e39737ab25d16cc:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:18:08,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-19T12:18:08,957 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f01f4f636d243e89435369583b775a1, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=535, earliestPutTs=1732018686679 2024-11-19T12:18:08,958 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bc89c62b9ed411cb4fe74addbf74f80, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=560, earliestPutTs=1732018686805 2024-11-19T12:18:08,958 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 807190dbe300451db6143cc5b3ea24ca, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=577, earliestPutTs=1732018687486 2024-11-19T12:18:08,960 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:08,962 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:08,962 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/B is initiating minor compaction (all files) 2024-11-19T12:18:08,962 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/B in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:08,962 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/98672f6138264f2e8da7971ddeafca88, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/f47fce6b84314b24845e274d3b2fe000, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/5e6930021dd94f47932adaf6b2456c43] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=37.3 K 2024-11-19T12:18:08,962 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98672f6138264f2e8da7971ddeafca88, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=535, earliestPutTs=1732018686679 2024-11-19T12:18:08,963 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting f47fce6b84314b24845e274d3b2fe000, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=560, earliestPutTs=1732018686814 2024-11-19T12:18:08,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-19T12:18:08,968 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e6930021dd94f47932adaf6b2456c43, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=577, earliestPutTs=1732018687498 2024-11-19T12:18:09,000 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:09,007 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#A#compaction#120 average throughput is 0.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:09,008 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/96a39ec02ce34535980fe169dcf7e8d6 is 50, key is test_row_0/A:col10/1732018687498/Put/seqid=0 2024-11-19T12:18:09,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-19T12:18:09,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:09,015 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-19T12:18:09,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:18:09,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:09,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:18:09,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:09,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:18:09,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:09,022 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#B#compaction#121 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:09,022 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/1c205ec374fd487d991ac1c8e8378048 is 50, key is test_row_0/B:col10/1732018687498/Put/seqid=0 2024-11-19T12:18:09,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:18:09,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. as already flushing 2024-11-19T12:18:09,024 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4aba57ed to 127.0.0.1:64186 2024-11-19T12:18:09,024 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:09,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/77a230c986524e13aa4e09ae55727db4 is 50, key is test_row_0/A:col10/1732018688771/Put/seqid=0 2024-11-19T12:18:09,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741959_1135 (size=13663) 2024-11-19T12:18:09,082 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/96a39ec02ce34535980fe169dcf7e8d6 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/96a39ec02ce34535980fe169dcf7e8d6 2024-11-19T12:18:09,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741960_1136 (size=13663) 2024-11-19T12:18:09,109 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x05038857 to 127.0.0.1:64186 2024-11-19T12:18:09,109 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:09,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741961_1137 (size=12301) 2024-11-19T12:18:09,118 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/A of 1bd7f6746cebf2fb7e39737ab25d16cc into 96a39ec02ce34535980fe169dcf7e8d6(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:09,118 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:09,118 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/A, priority=13, startTime=1732018688955; duration=0sec 2024-11-19T12:18:09,118 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:09,118 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:A 2024-11-19T12:18:09,119 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:09,120 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/1c205ec374fd487d991ac1c8e8378048 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1c205ec374fd487d991ac1c8e8378048 2024-11-19T12:18:09,120 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:09,121 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 1bd7f6746cebf2fb7e39737ab25d16cc/C is initiating minor compaction (all files) 2024-11-19T12:18:09,121 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1bd7f6746cebf2fb7e39737ab25d16cc/C in TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:09,121 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/369e405d0c874bcb87489ecf86a972cd, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b18d3853509f47d1b35bab087be6a2e2, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f36e7944edd347cfabb01f7dc6b47dfc] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp, totalSize=37.3 K 2024-11-19T12:18:09,122 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 369e405d0c874bcb87489ecf86a972cd, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=535, earliestPutTs=1732018686679 2024-11-19T12:18:09,123 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting b18d3853509f47d1b35bab087be6a2e2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=560, earliestPutTs=1732018686814 2024-11-19T12:18:09,126 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting f36e7944edd347cfabb01f7dc6b47dfc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=577, earliestPutTs=1732018687498 2024-11-19T12:18:09,134 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/B of 1bd7f6746cebf2fb7e39737ab25d16cc into 1c205ec374fd487d991ac1c8e8378048(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:09,134 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:09,134 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/B, priority=13, startTime=1732018688957; duration=0sec 2024-11-19T12:18:09,134 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:09,134 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:B 2024-11-19T12:18:09,145 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1bd7f6746cebf2fb7e39737ab25d16cc#C#compaction#123 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:09,146 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/6a5bcbf5178543ebaefdd4560b9a192b is 50, key is test_row_0/C:col10/1732018687498/Put/seqid=0 2024-11-19T12:18:09,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741962_1138 (size=13663) 2024-11-19T12:18:09,217 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/6a5bcbf5178543ebaefdd4560b9a192b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/6a5bcbf5178543ebaefdd4560b9a192b 2024-11-19T12:18:09,227 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1bd7f6746cebf2fb7e39737ab25d16cc/C of 1bd7f6746cebf2fb7e39737ab25d16cc into 6a5bcbf5178543ebaefdd4560b9a192b(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:09,227 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:09,227 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc., storeName=1bd7f6746cebf2fb7e39737ab25d16cc/C, priority=13, startTime=1732018688957; duration=0sec 2024-11-19T12:18:09,227 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:09,227 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1bd7f6746cebf2fb7e39737ab25d16cc:C 2024-11-19T12:18:09,517 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=599 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/77a230c986524e13aa4e09ae55727db4 2024-11-19T12:18:09,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/6dafb93b871b4884917e5f7a4bafff83 is 50, key is test_row_0/B:col10/1732018688771/Put/seqid=0 2024-11-19T12:18:09,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741963_1139 (size=12301) 2024-11-19T12:18:09,555 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=599 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/6dafb93b871b4884917e5f7a4bafff83 2024-11-19T12:18:09,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/61a7beb3b9444f0d8c390d26ec4152b2 is 50, key is test_row_0/C:col10/1732018688771/Put/seqid=0 2024-11-19T12:18:09,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741964_1140 (size=12301) 2024-11-19T12:18:09,647 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x629b91f8 to 127.0.0.1:64186 2024-11-19T12:18:09,647 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:09,677 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62b16227 to 127.0.0.1:64186 2024-11-19T12:18:09,677 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:09,927 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T12:18:10,000 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=599 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/61a7beb3b9444f0d8c390d26ec4152b2 2024-11-19T12:18:10,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/77a230c986524e13aa4e09ae55727db4 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/77a230c986524e13aa4e09ae55727db4 2024-11-19T12:18:10,026 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/77a230c986524e13aa4e09ae55727db4, entries=150, sequenceid=599, filesize=12.0 K 2024-11-19T12:18:10,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/6dafb93b871b4884917e5f7a4bafff83 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6dafb93b871b4884917e5f7a4bafff83 2024-11-19T12:18:10,034 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6dafb93b871b4884917e5f7a4bafff83, entries=150, sequenceid=599, filesize=12.0 K 2024-11-19T12:18:10,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/61a7beb3b9444f0d8c390d26ec4152b2 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/61a7beb3b9444f0d8c390d26ec4152b2 2024-11-19T12:18:10,043 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/61a7beb3b9444f0d8c390d26ec4152b2, entries=150, sequenceid=599, filesize=12.0 K 2024-11-19T12:18:10,044 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=26.84 KB/27480 for 1bd7f6746cebf2fb7e39737ab25d16cc in 1029ms, sequenceid=599, compaction requested=false 2024-11-19T12:18:10,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:10,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:10,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-19T12:18:10,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-19T12:18:10,047 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-19T12:18:10,047 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1820 sec 2024-11-19T12:18:10,049 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 3.1890 sec 2024-11-19T12:18:10,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-19T12:18:10,967 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-19T12:18:11,998 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53f30e40 to 127.0.0.1:64186 2024-11-19T12:18:11,998 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:11,999 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-19T12:18:11,999 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 117 2024-11-19T12:18:11,999 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 96 2024-11-19T12:18:11,999 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 90 2024-11-19T12:18:11,999 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 93 2024-11-19T12:18:11,999 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 84 2024-11-19T12:18:11,999 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-19T12:18:11,999 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5933 2024-11-19T12:18:11,999 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5927 2024-11-19T12:18:11,999 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-19T12:18:11,999 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2625 2024-11-19T12:18:11,999 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7874 rows 2024-11-19T12:18:11,999 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2641 2024-11-19T12:18:11,999 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7923 rows 2024-11-19T12:18:11,999 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-19T12:18:11,999 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6be4168e to 127.0.0.1:64186 2024-11-19T12:18:12,000 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:12,007 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-19T12:18:12,013 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-19T12:18:12,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-19T12:18:12,023 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018692022"}]},"ts":"1732018692022"} 2024-11-19T12:18:12,024 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-19T12:18:12,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-19T12:18:12,041 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-19T12:18:12,043 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-19T12:18:12,048 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1bd7f6746cebf2fb7e39737ab25d16cc, UNASSIGN}] 2024-11-19T12:18:12,049 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1bd7f6746cebf2fb7e39737ab25d16cc, UNASSIGN 2024-11-19T12:18:12,052 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=1bd7f6746cebf2fb7e39737ab25d16cc, regionState=CLOSING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:18:12,056 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-19T12:18:12,056 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; CloseRegionProcedure 1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:18:12,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-19T12:18:12,218 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:12,220 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(124): Close 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:18:12,220 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-19T12:18:12,221 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1681): Closing 1bd7f6746cebf2fb7e39737ab25d16cc, disabling compactions & flushes 2024-11-19T12:18:12,221 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:12,221 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:12,222 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. after waiting 0 ms 2024-11-19T12:18:12,222 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:12,222 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(2837): Flushing 1bd7f6746cebf2fb7e39737ab25d16cc 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-19T12:18:12,222 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=A 2024-11-19T12:18:12,222 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:12,222 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=B 2024-11-19T12:18:12,222 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:12,222 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1bd7f6746cebf2fb7e39737ab25d16cc, store=C 2024-11-19T12:18:12,223 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:12,234 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/18bda39fa450409f9dfb78e297bd96ac is 50, key is test_row_0/A:col10/1732018689023/Put/seqid=0 2024-11-19T12:18:12,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741965_1141 (size=12301) 2024-11-19T12:18:12,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-19T12:18:12,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-19T12:18:12,647 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=610 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/18bda39fa450409f9dfb78e297bd96ac 2024-11-19T12:18:12,662 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/af94b2f02bb34a09af0f0642bf6c867b is 50, key is test_row_0/B:col10/1732018689023/Put/seqid=0 2024-11-19T12:18:12,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741966_1142 (size=12301) 2024-11-19T12:18:13,079 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=610 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/af94b2f02bb34a09af0f0642bf6c867b 2024-11-19T12:18:13,100 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/af5d336efcb3481d85970916645c3bc2 is 50, key is test_row_0/C:col10/1732018689023/Put/seqid=0 2024-11-19T12:18:13,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741967_1143 (size=12301) 2024-11-19T12:18:13,129 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=610 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/af5d336efcb3481d85970916645c3bc2 2024-11-19T12:18:13,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-19T12:18:13,144 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/A/18bda39fa450409f9dfb78e297bd96ac as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/18bda39fa450409f9dfb78e297bd96ac 2024-11-19T12:18:13,161 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/18bda39fa450409f9dfb78e297bd96ac, entries=150, sequenceid=610, filesize=12.0 K 2024-11-19T12:18:13,162 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/B/af94b2f02bb34a09af0f0642bf6c867b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/af94b2f02bb34a09af0f0642bf6c867b 2024-11-19T12:18:13,169 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/af94b2f02bb34a09af0f0642bf6c867b, entries=150, sequenceid=610, filesize=12.0 K 2024-11-19T12:18:13,175 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/.tmp/C/af5d336efcb3481d85970916645c3bc2 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/af5d336efcb3481d85970916645c3bc2 2024-11-19T12:18:13,194 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/af5d336efcb3481d85970916645c3bc2, entries=150, sequenceid=610, filesize=12.0 K 2024-11-19T12:18:13,198 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 1bd7f6746cebf2fb7e39737ab25d16cc in 976ms, sequenceid=610, compaction requested=true 2024-11-19T12:18:13,199 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/783c515d094a4e2fb459ce8ff150d198, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/74505db564fe4a98991f64e751d8dcad, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/ceb87a3d466b4f358424f2bfbbcc4b51, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/d97c68e66de44def9477ac926d6bf4a9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/5b068f700dab4b9bbad0a32d44ae2e10, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/6b5f00b248dd439ab392c243b2822e8d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/3c16fa9d72ba4135983424e521e9ced1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/898b9cf3d29047f58a37ba2422954dab, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/1783bfa2412044af943fd8f8eed7a285, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/aa0e7458b7fc4b58bd7479cbe9cb487f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/4d1e8380e83a44c48ad306a454a3dbe9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/3cb386632e524b7f90494fa82f354c00, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/a3ed39b837204d82a70505ff283d8db5, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/17bd7e1610b4418d881e75be3aa2430d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/34a53df7fd594c3b94f3bb70fd330ffb, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/8e3ce5e460224431916b1e5dd041ac9e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/560edcc1c8ab4c9e9982bb58b8d36602, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/3fab50c1cc1249c7b25b5b16e1a01753, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/6ac971f46e084fc096f24a7afa6f299f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/11ec8c4b53af4e4f84c859de910c9fe4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/b851c34ae32d447c95655486a120d842, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/32c8280e56504c8d97cae1793a6b5d2e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/36396cea97fa41f2b108f0296cac2a34, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/616ad125d5a947b68e9406ae0aa11c2d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/d95285061bcc4b34b50c1584fa32e5f8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/dd433a8092c34b89a5e0cbbbba3d4e56, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/acba55aede0b43bbb0c3aabbbce90c26, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/5c19c5ca8d774109b28365bf4174d2fc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/6b75f1e4527943cab26d9daed87e1bd8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/ad57c15442a64e32a0ebab963f0bdf0d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/c3e424d276ef4d50b74de13bd51380d9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/8265ac4cfb79481587be7c429894cb14, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/03efea52cd134160b2a2ea2e94beea14, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/17f9fcd4743c495fb87ab836957103ea, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/7de2173a7fa9437ca09f3ae17396c4f1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/cd9c05f63f974c31a10aa9de294a8d30, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/865034f0f38a4ed3ab541bd9b0e460ad, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/8f01f4f636d243e89435369583b775a1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/9bc89c62b9ed411cb4fe74addbf74f80, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/807190dbe300451db6143cc5b3ea24ca] to archive 2024-11-19T12:18:13,203 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:18:13,222 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/783c515d094a4e2fb459ce8ff150d198 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/783c515d094a4e2fb459ce8ff150d198 2024-11-19T12:18:13,230 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/74505db564fe4a98991f64e751d8dcad to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/74505db564fe4a98991f64e751d8dcad 2024-11-19T12:18:13,232 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/ceb87a3d466b4f358424f2bfbbcc4b51 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/ceb87a3d466b4f358424f2bfbbcc4b51 2024-11-19T12:18:13,239 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/d97c68e66de44def9477ac926d6bf4a9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/d97c68e66de44def9477ac926d6bf4a9 2024-11-19T12:18:13,241 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/5b068f700dab4b9bbad0a32d44ae2e10 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/5b068f700dab4b9bbad0a32d44ae2e10 2024-11-19T12:18:13,244 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/6b5f00b248dd439ab392c243b2822e8d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/6b5f00b248dd439ab392c243b2822e8d 2024-11-19T12:18:13,246 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/3c16fa9d72ba4135983424e521e9ced1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/3c16fa9d72ba4135983424e521e9ced1 2024-11-19T12:18:13,250 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/898b9cf3d29047f58a37ba2422954dab to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/898b9cf3d29047f58a37ba2422954dab 2024-11-19T12:18:13,258 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/1783bfa2412044af943fd8f8eed7a285 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/1783bfa2412044af943fd8f8eed7a285 2024-11-19T12:18:13,264 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/aa0e7458b7fc4b58bd7479cbe9cb487f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/aa0e7458b7fc4b58bd7479cbe9cb487f 2024-11-19T12:18:13,273 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/4d1e8380e83a44c48ad306a454a3dbe9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/4d1e8380e83a44c48ad306a454a3dbe9 2024-11-19T12:18:13,275 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/3cb386632e524b7f90494fa82f354c00 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/3cb386632e524b7f90494fa82f354c00 2024-11-19T12:18:13,279 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/a3ed39b837204d82a70505ff283d8db5 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/a3ed39b837204d82a70505ff283d8db5 2024-11-19T12:18:13,290 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/17bd7e1610b4418d881e75be3aa2430d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/17bd7e1610b4418d881e75be3aa2430d 2024-11-19T12:18:13,291 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/34a53df7fd594c3b94f3bb70fd330ffb to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/34a53df7fd594c3b94f3bb70fd330ffb 2024-11-19T12:18:13,293 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/8e3ce5e460224431916b1e5dd041ac9e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/8e3ce5e460224431916b1e5dd041ac9e 2024-11-19T12:18:13,295 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/560edcc1c8ab4c9e9982bb58b8d36602 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/560edcc1c8ab4c9e9982bb58b8d36602 2024-11-19T12:18:13,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/3fab50c1cc1249c7b25b5b16e1a01753 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/3fab50c1cc1249c7b25b5b16e1a01753 2024-11-19T12:18:13,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/6ac971f46e084fc096f24a7afa6f299f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/6ac971f46e084fc096f24a7afa6f299f 2024-11-19T12:18:13,312 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/11ec8c4b53af4e4f84c859de910c9fe4 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/11ec8c4b53af4e4f84c859de910c9fe4 2024-11-19T12:18:13,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/b851c34ae32d447c95655486a120d842 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/b851c34ae32d447c95655486a120d842 2024-11-19T12:18:13,314 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/32c8280e56504c8d97cae1793a6b5d2e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/32c8280e56504c8d97cae1793a6b5d2e 2024-11-19T12:18:13,315 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/36396cea97fa41f2b108f0296cac2a34 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/36396cea97fa41f2b108f0296cac2a34 2024-11-19T12:18:13,317 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/616ad125d5a947b68e9406ae0aa11c2d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/616ad125d5a947b68e9406ae0aa11c2d 2024-11-19T12:18:13,318 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/d95285061bcc4b34b50c1584fa32e5f8 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/d95285061bcc4b34b50c1584fa32e5f8 2024-11-19T12:18:13,319 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/dd433a8092c34b89a5e0cbbbba3d4e56 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/dd433a8092c34b89a5e0cbbbba3d4e56 2024-11-19T12:18:13,320 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/acba55aede0b43bbb0c3aabbbce90c26 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/acba55aede0b43bbb0c3aabbbce90c26 2024-11-19T12:18:13,321 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/5c19c5ca8d774109b28365bf4174d2fc to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/5c19c5ca8d774109b28365bf4174d2fc 2024-11-19T12:18:13,323 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/6b75f1e4527943cab26d9daed87e1bd8 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/6b75f1e4527943cab26d9daed87e1bd8 2024-11-19T12:18:13,324 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/ad57c15442a64e32a0ebab963f0bdf0d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/ad57c15442a64e32a0ebab963f0bdf0d 2024-11-19T12:18:13,326 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/c3e424d276ef4d50b74de13bd51380d9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/c3e424d276ef4d50b74de13bd51380d9 2024-11-19T12:18:13,346 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/8265ac4cfb79481587be7c429894cb14 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/8265ac4cfb79481587be7c429894cb14 2024-11-19T12:18:13,349 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/03efea52cd134160b2a2ea2e94beea14 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/03efea52cd134160b2a2ea2e94beea14 2024-11-19T12:18:13,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/17f9fcd4743c495fb87ab836957103ea to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/17f9fcd4743c495fb87ab836957103ea 2024-11-19T12:18:13,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/7de2173a7fa9437ca09f3ae17396c4f1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/7de2173a7fa9437ca09f3ae17396c4f1 2024-11-19T12:18:13,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/cd9c05f63f974c31a10aa9de294a8d30 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/cd9c05f63f974c31a10aa9de294a8d30 2024-11-19T12:18:13,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/865034f0f38a4ed3ab541bd9b0e460ad to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/865034f0f38a4ed3ab541bd9b0e460ad 2024-11-19T12:18:13,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/8f01f4f636d243e89435369583b775a1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/8f01f4f636d243e89435369583b775a1 2024-11-19T12:18:13,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/9bc89c62b9ed411cb4fe74addbf74f80 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/9bc89c62b9ed411cb4fe74addbf74f80 2024-11-19T12:18:13,384 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/807190dbe300451db6143cc5b3ea24ca to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/807190dbe300451db6143cc5b3ea24ca 2024-11-19T12:18:13,403 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/26a15a033f314009b8b3785cdcc2f02c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1cf15817c24d4032b362bd444e91f4e1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/d01fb2113adc4b71b71dd560fae54dbc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/aa81ccf311ab4d1e9c2cb93da0c69d1d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6d0ea21eff2a423fb9698c569926954a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/010a0990052b49f6b6a77762ff174ff8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1d7570e0543d4c1482bf693fc3e91a85, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/4fac8d5a36134c948cd79323755ff097, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/e08a7cbd6ce6481dbe21d46e598bff0b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/ee99d9253b1e45b99000aa8291857f21, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/693d04812c8f471694adb0d658ca5d9d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1dc6e049f5464fcea976198dd8b4895c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/478eaf48189646e3ab1e23344f0d1290, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/d1c510e2d9d14f308340bb5b6ec83904, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/785249d5693f444fbf0f8387d3fe248e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/4ddea4c545074efa9b27c965948c993f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6e0374ecb4594a18b4a0d43085d6fe20, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/b1d38b7bfccb4d46a5ce20d43840124b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/115fe210146f4365abe89fd3bac409c7, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/af3a463fc19a44288cf2891c71784fa1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/c29c4a27514e474fae79deb4d5f671b8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/55fb5358a12443cab338ebac05033ad1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/4e5feb2231384545889a166b165547d6, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/de46c0f65a554be6b7ab3890c9f99825, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/e94b6f4ab75e47688f13adf66ca6718c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/a6b069a5389f4331831115322b9018cd, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/d784fd79aa4340edb7dc8386fbb2e633, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/5441207f55984f35abffa611dcb23310, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/78f0db79d0db4d4a98bb90d00a1f7cbb, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6a782fcb3f2c4159af1723ffd2742b0c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/f72b1bbd16c44b6b84ea68e7a34d83fa, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/bbed593ecce34e299ee64de9a1577630, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/a496978e16c845d38bdf33e54136a023, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/08273ae763d2454fa639dcd0f679485c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/78b85e41ad8449968fa19d2693362e42, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/92aa32d2c7f54ac5a73c4deb4f40990f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/98672f6138264f2e8da7971ddeafca88, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/acda45cfe510487bab1864841fd3a833, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/f47fce6b84314b24845e274d3b2fe000, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/5e6930021dd94f47932adaf6b2456c43] to archive 2024-11-19T12:18:13,412 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:18:13,415 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/26a15a033f314009b8b3785cdcc2f02c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/26a15a033f314009b8b3785cdcc2f02c 2024-11-19T12:18:13,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1cf15817c24d4032b362bd444e91f4e1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1cf15817c24d4032b362bd444e91f4e1 2024-11-19T12:18:13,427 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/d01fb2113adc4b71b71dd560fae54dbc to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/d01fb2113adc4b71b71dd560fae54dbc 2024-11-19T12:18:13,434 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/aa81ccf311ab4d1e9c2cb93da0c69d1d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/aa81ccf311ab4d1e9c2cb93da0c69d1d 2024-11-19T12:18:13,436 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6d0ea21eff2a423fb9698c569926954a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6d0ea21eff2a423fb9698c569926954a 2024-11-19T12:18:13,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/010a0990052b49f6b6a77762ff174ff8 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/010a0990052b49f6b6a77762ff174ff8 2024-11-19T12:18:13,439 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1d7570e0543d4c1482bf693fc3e91a85 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1d7570e0543d4c1482bf693fc3e91a85 2024-11-19T12:18:13,451 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/4fac8d5a36134c948cd79323755ff097 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/4fac8d5a36134c948cd79323755ff097 2024-11-19T12:18:13,454 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/e08a7cbd6ce6481dbe21d46e598bff0b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/e08a7cbd6ce6481dbe21d46e598bff0b 2024-11-19T12:18:13,459 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/ee99d9253b1e45b99000aa8291857f21 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/ee99d9253b1e45b99000aa8291857f21 2024-11-19T12:18:13,460 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/693d04812c8f471694adb0d658ca5d9d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/693d04812c8f471694adb0d658ca5d9d 2024-11-19T12:18:13,468 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1dc6e049f5464fcea976198dd8b4895c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1dc6e049f5464fcea976198dd8b4895c 2024-11-19T12:18:13,470 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/478eaf48189646e3ab1e23344f0d1290 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/478eaf48189646e3ab1e23344f0d1290 2024-11-19T12:18:13,471 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/d1c510e2d9d14f308340bb5b6ec83904 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/d1c510e2d9d14f308340bb5b6ec83904 2024-11-19T12:18:13,475 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/785249d5693f444fbf0f8387d3fe248e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/785249d5693f444fbf0f8387d3fe248e 2024-11-19T12:18:13,477 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/4ddea4c545074efa9b27c965948c993f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/4ddea4c545074efa9b27c965948c993f 2024-11-19T12:18:13,478 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6e0374ecb4594a18b4a0d43085d6fe20 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6e0374ecb4594a18b4a0d43085d6fe20 2024-11-19T12:18:13,479 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/b1d38b7bfccb4d46a5ce20d43840124b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/b1d38b7bfccb4d46a5ce20d43840124b 2024-11-19T12:18:13,481 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/115fe210146f4365abe89fd3bac409c7 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/115fe210146f4365abe89fd3bac409c7 2024-11-19T12:18:13,482 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/af3a463fc19a44288cf2891c71784fa1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/af3a463fc19a44288cf2891c71784fa1 2024-11-19T12:18:13,484 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/c29c4a27514e474fae79deb4d5f671b8 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/c29c4a27514e474fae79deb4d5f671b8 2024-11-19T12:18:13,487 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/55fb5358a12443cab338ebac05033ad1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/55fb5358a12443cab338ebac05033ad1 2024-11-19T12:18:13,490 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/4e5feb2231384545889a166b165547d6 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/4e5feb2231384545889a166b165547d6 2024-11-19T12:18:13,510 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/de46c0f65a554be6b7ab3890c9f99825 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/de46c0f65a554be6b7ab3890c9f99825 2024-11-19T12:18:13,513 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/e94b6f4ab75e47688f13adf66ca6718c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/e94b6f4ab75e47688f13adf66ca6718c 2024-11-19T12:18:13,514 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/a6b069a5389f4331831115322b9018cd to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/a6b069a5389f4331831115322b9018cd 2024-11-19T12:18:13,516 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/d784fd79aa4340edb7dc8386fbb2e633 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/d784fd79aa4340edb7dc8386fbb2e633 2024-11-19T12:18:13,517 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/5441207f55984f35abffa611dcb23310 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/5441207f55984f35abffa611dcb23310 2024-11-19T12:18:13,519 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/78f0db79d0db4d4a98bb90d00a1f7cbb to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/78f0db79d0db4d4a98bb90d00a1f7cbb 2024-11-19T12:18:13,520 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6a782fcb3f2c4159af1723ffd2742b0c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6a782fcb3f2c4159af1723ffd2742b0c 2024-11-19T12:18:13,522 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/f72b1bbd16c44b6b84ea68e7a34d83fa to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/f72b1bbd16c44b6b84ea68e7a34d83fa 2024-11-19T12:18:13,526 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/bbed593ecce34e299ee64de9a1577630 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/bbed593ecce34e299ee64de9a1577630 2024-11-19T12:18:13,527 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/a496978e16c845d38bdf33e54136a023 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/a496978e16c845d38bdf33e54136a023 2024-11-19T12:18:13,529 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/08273ae763d2454fa639dcd0f679485c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/08273ae763d2454fa639dcd0f679485c 2024-11-19T12:18:13,530 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/78b85e41ad8449968fa19d2693362e42 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/78b85e41ad8449968fa19d2693362e42 2024-11-19T12:18:13,533 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/92aa32d2c7f54ac5a73c4deb4f40990f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/92aa32d2c7f54ac5a73c4deb4f40990f 2024-11-19T12:18:13,539 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/98672f6138264f2e8da7971ddeafca88 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/98672f6138264f2e8da7971ddeafca88 2024-11-19T12:18:13,544 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/acda45cfe510487bab1864841fd3a833 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/acda45cfe510487bab1864841fd3a833 2024-11-19T12:18:13,546 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/f47fce6b84314b24845e274d3b2fe000 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/f47fce6b84314b24845e274d3b2fe000 2024-11-19T12:18:13,547 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/5e6930021dd94f47932adaf6b2456c43 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/5e6930021dd94f47932adaf6b2456c43 2024-11-19T12:18:13,555 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/1f73f7ddbb594cef974bcc826565a7f7, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b39f1927423742efacd940c933faa1bd, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/5216d19ebfe94fcf8cf4c0fc129c25ce, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b6d8baadeac24b21b05c31f7f6121db6, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/381d13661c6d48afaa1f2bf29e554161, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/8b38b008ccf744c08ef2a545024bd0e9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b613eba33c0b469ab4c68e3ac3c237d9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/75cdb9f3f0e349b6b541f482e9433b70, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/0f7246f61fee4bb283b30488cca79295, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/3b784d65742d42c7acac4e72d9a06439, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/873eae9d835b4a80a292a247cfe22c87, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/27b4fea7debc41559d69293e8b0f0e01, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b9edb644a3e345ceaa96f628a617b4ea, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/c39b3ffe81b946ed935cb47fc164681d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/86f656ea955d43e486a4a39223a945f6, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/a7673bbd886a4dcfb5332818f70a8c8e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/5e6d5a9d39a648e1a8e00eece4b00c78, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/fde937863610463f8180f1938181d1e5, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/d8f96c7825e64689a1c83b6defe3b8a2, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/3402f6ff70ac4240bec0b7a9827837e5, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/30491dae26ce4bceb7b6c36ea614ce13, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/a532dc47a48e4995a6e22d7f5057485d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/84a0883cae4c4648a531c67fc23b55c7, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/6e8dd928f2b8486cba39eda5710763f8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f55055bfa54a41068da6c794301c2ce2, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/93783d132885471480c890b6f0bb0ff1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/6ed2fc24565c431188653f21e59766af, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/38f2b63b3e2f4b60a65d26a8efa30c74, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f05c7e8b68b744eeba259e81c202fbd0, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/cc4994b5b9f34e9e8843455fb67762d9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/2a83737ca89047278d480f20591ee6b1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/82565a22e1e547c28fd4b8a59f8e2440, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/42f29ef5fd56468281876c7a2cd7586a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b2063eb97fbd4225b2922fb018375559, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f0c8f66f79eb4c1c85cccc08d67efe25, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b74a74649a4f4ff2a8b803df48248892, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/369e405d0c874bcb87489ecf86a972cd, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/54d11889ff714ec3a6a94f1f453c416a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b18d3853509f47d1b35bab087be6a2e2, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f36e7944edd347cfabb01f7dc6b47dfc] to archive 2024-11-19T12:18:13,564 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:18:13,580 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/1f73f7ddbb594cef974bcc826565a7f7 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/1f73f7ddbb594cef974bcc826565a7f7 2024-11-19T12:18:13,582 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b39f1927423742efacd940c933faa1bd to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b39f1927423742efacd940c933faa1bd 2024-11-19T12:18:13,588 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/5216d19ebfe94fcf8cf4c0fc129c25ce to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/5216d19ebfe94fcf8cf4c0fc129c25ce 2024-11-19T12:18:13,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b6d8baadeac24b21b05c31f7f6121db6 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b6d8baadeac24b21b05c31f7f6121db6 2024-11-19T12:18:13,603 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/381d13661c6d48afaa1f2bf29e554161 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/381d13661c6d48afaa1f2bf29e554161 2024-11-19T12:18:13,610 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/8b38b008ccf744c08ef2a545024bd0e9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/8b38b008ccf744c08ef2a545024bd0e9 2024-11-19T12:18:13,612 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b613eba33c0b469ab4c68e3ac3c237d9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b613eba33c0b469ab4c68e3ac3c237d9 2024-11-19T12:18:13,615 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/75cdb9f3f0e349b6b541f482e9433b70 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/75cdb9f3f0e349b6b541f482e9433b70 2024-11-19T12:18:13,632 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/0f7246f61fee4bb283b30488cca79295 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/0f7246f61fee4bb283b30488cca79295 2024-11-19T12:18:13,636 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/3b784d65742d42c7acac4e72d9a06439 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/3b784d65742d42c7acac4e72d9a06439 2024-11-19T12:18:13,647 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/873eae9d835b4a80a292a247cfe22c87 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/873eae9d835b4a80a292a247cfe22c87 2024-11-19T12:18:13,649 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/27b4fea7debc41559d69293e8b0f0e01 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/27b4fea7debc41559d69293e8b0f0e01 2024-11-19T12:18:13,650 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b9edb644a3e345ceaa96f628a617b4ea to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b9edb644a3e345ceaa96f628a617b4ea 2024-11-19T12:18:13,653 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/c39b3ffe81b946ed935cb47fc164681d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/c39b3ffe81b946ed935cb47fc164681d 2024-11-19T12:18:13,662 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/86f656ea955d43e486a4a39223a945f6 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/86f656ea955d43e486a4a39223a945f6 2024-11-19T12:18:13,668 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/a7673bbd886a4dcfb5332818f70a8c8e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/a7673bbd886a4dcfb5332818f70a8c8e 2024-11-19T12:18:13,679 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/5e6d5a9d39a648e1a8e00eece4b00c78 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/5e6d5a9d39a648e1a8e00eece4b00c78 2024-11-19T12:18:13,681 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/fde937863610463f8180f1938181d1e5 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/fde937863610463f8180f1938181d1e5 2024-11-19T12:18:13,684 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/d8f96c7825e64689a1c83b6defe3b8a2 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/d8f96c7825e64689a1c83b6defe3b8a2 2024-11-19T12:18:13,692 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/3402f6ff70ac4240bec0b7a9827837e5 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/3402f6ff70ac4240bec0b7a9827837e5 2024-11-19T12:18:13,699 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/30491dae26ce4bceb7b6c36ea614ce13 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/30491dae26ce4bceb7b6c36ea614ce13 2024-11-19T12:18:13,703 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/a532dc47a48e4995a6e22d7f5057485d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/a532dc47a48e4995a6e22d7f5057485d 2024-11-19T12:18:13,704 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/84a0883cae4c4648a531c67fc23b55c7 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/84a0883cae4c4648a531c67fc23b55c7 2024-11-19T12:18:13,716 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/6e8dd928f2b8486cba39eda5710763f8 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/6e8dd928f2b8486cba39eda5710763f8 2024-11-19T12:18:13,718 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f55055bfa54a41068da6c794301c2ce2 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f55055bfa54a41068da6c794301c2ce2 2024-11-19T12:18:13,719 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/93783d132885471480c890b6f0bb0ff1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/93783d132885471480c890b6f0bb0ff1 2024-11-19T12:18:13,720 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/6ed2fc24565c431188653f21e59766af to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/6ed2fc24565c431188653f21e59766af 2024-11-19T12:18:13,722 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/38f2b63b3e2f4b60a65d26a8efa30c74 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/38f2b63b3e2f4b60a65d26a8efa30c74 2024-11-19T12:18:13,723 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f05c7e8b68b744eeba259e81c202fbd0 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f05c7e8b68b744eeba259e81c202fbd0 2024-11-19T12:18:13,737 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/cc4994b5b9f34e9e8843455fb67762d9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/cc4994b5b9f34e9e8843455fb67762d9 2024-11-19T12:18:13,741 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/2a83737ca89047278d480f20591ee6b1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/2a83737ca89047278d480f20591ee6b1 2024-11-19T12:18:13,746 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/82565a22e1e547c28fd4b8a59f8e2440 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/82565a22e1e547c28fd4b8a59f8e2440 2024-11-19T12:18:13,747 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/42f29ef5fd56468281876c7a2cd7586a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/42f29ef5fd56468281876c7a2cd7586a 2024-11-19T12:18:13,749 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b2063eb97fbd4225b2922fb018375559 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b2063eb97fbd4225b2922fb018375559 2024-11-19T12:18:13,750 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f0c8f66f79eb4c1c85cccc08d67efe25 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f0c8f66f79eb4c1c85cccc08d67efe25 2024-11-19T12:18:13,753 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b74a74649a4f4ff2a8b803df48248892 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b74a74649a4f4ff2a8b803df48248892 2024-11-19T12:18:13,758 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/369e405d0c874bcb87489ecf86a972cd to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/369e405d0c874bcb87489ecf86a972cd 2024-11-19T12:18:13,762 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/54d11889ff714ec3a6a94f1f453c416a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/54d11889ff714ec3a6a94f1f453c416a 2024-11-19T12:18:13,764 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b18d3853509f47d1b35bab087be6a2e2 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/b18d3853509f47d1b35bab087be6a2e2 2024-11-19T12:18:13,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f36e7944edd347cfabb01f7dc6b47dfc to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/f36e7944edd347cfabb01f7dc6b47dfc 2024-11-19T12:18:13,771 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/recovered.edits/613.seqid, newMaxSeqId=613, maxSeqId=1 2024-11-19T12:18:13,775 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc. 2024-11-19T12:18:13,775 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1635): Region close journal for 1bd7f6746cebf2fb7e39737ab25d16cc: 2024-11-19T12:18:13,777 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(170): Closed 1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:18:13,778 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=1bd7f6746cebf2fb7e39737ab25d16cc, regionState=CLOSED 2024-11-19T12:18:13,781 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-19T12:18:13,781 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseRegionProcedure 1bd7f6746cebf2fb7e39737ab25d16cc, server=af314c41f984,36047,1732018661455 in 1.7230 sec 2024-11-19T12:18:13,783 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=33 2024-11-19T12:18:13,783 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=33, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1bd7f6746cebf2fb7e39737ab25d16cc, UNASSIGN in 1.7330 sec 2024-11-19T12:18:13,794 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-19T12:18:13,794 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.7410 sec 2024-11-19T12:18:13,796 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018693795"}]},"ts":"1732018693795"} 2024-11-19T12:18:13,797 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-19T12:18:13,799 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-19T12:18:13,801 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.7840 sec 2024-11-19T12:18:14,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-19T12:18:14,132 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-19T12:18:14,135 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-19T12:18:14,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:18:14,142 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:18:14,144 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=36, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:18:14,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-19T12:18:14,164 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:18:14,182 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/recovered.edits] 2024-11-19T12:18:14,192 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/18bda39fa450409f9dfb78e297bd96ac to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/18bda39fa450409f9dfb78e297bd96ac 2024-11-19T12:18:14,194 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/77a230c986524e13aa4e09ae55727db4 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/77a230c986524e13aa4e09ae55727db4 2024-11-19T12:18:14,198 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/96a39ec02ce34535980fe169dcf7e8d6 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/A/96a39ec02ce34535980fe169dcf7e8d6 2024-11-19T12:18:14,209 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1c205ec374fd487d991ac1c8e8378048 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/1c205ec374fd487d991ac1c8e8378048 2024-11-19T12:18:14,219 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6dafb93b871b4884917e5f7a4bafff83 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/6dafb93b871b4884917e5f7a4bafff83 2024-11-19T12:18:14,221 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/af94b2f02bb34a09af0f0642bf6c867b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/B/af94b2f02bb34a09af0f0642bf6c867b 2024-11-19T12:18:14,227 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/61a7beb3b9444f0d8c390d26ec4152b2 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/61a7beb3b9444f0d8c390d26ec4152b2 2024-11-19T12:18:14,237 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/6a5bcbf5178543ebaefdd4560b9a192b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/6a5bcbf5178543ebaefdd4560b9a192b 2024-11-19T12:18:14,243 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/af5d336efcb3481d85970916645c3bc2 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/C/af5d336efcb3481d85970916645c3bc2 2024-11-19T12:18:14,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-19T12:18:14,262 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/recovered.edits/613.seqid to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc/recovered.edits/613.seqid 2024-11-19T12:18:14,264 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/1bd7f6746cebf2fb7e39737ab25d16cc 2024-11-19T12:18:14,265 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-19T12:18:14,289 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=36, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:18:14,294 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-19T12:18:14,298 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-19T12:18:14,339 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-19T12:18:14,342 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=36, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:18:14,342 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-19T12:18:14,342 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732018694342"}]},"ts":"9223372036854775807"} 2024-11-19T12:18:14,351 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-19T12:18:14,351 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 1bd7f6746cebf2fb7e39737ab25d16cc, NAME => 'TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc.', STARTKEY => '', ENDKEY => ''}] 2024-11-19T12:18:14,351 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-19T12:18:14,351 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732018694351"}]},"ts":"9223372036854775807"} 2024-11-19T12:18:14,362 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-19T12:18:14,368 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=36, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:18:14,375 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 232 msec 2024-11-19T12:18:14,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-19T12:18:14,450 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-11-19T12:18:14,470 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=239 (was 219) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-633302489_22 at /127.0.0.1:54706 [Waiting for operation #286] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/af314c41f984:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5525568c-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5525568c-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;af314c41f984:36047-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5525568c-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/af314c41f984:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_222723292_22 at /127.0.0.1:54856 [Waiting for operation #232] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5525568c-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=458 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=498 (was 148) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2028 (was 2615) 2024-11-19T12:18:14,484 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=239, OpenFileDescriptor=458, MaxFileDescriptor=1048576, SystemLoadAverage=498, ProcessCount=11, AvailableMemoryMB=2028 2024-11-19T12:18:14,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-19T12:18:14,487 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:18:14,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-19T12:18:14,490 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T12:18:14,490 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:14,490 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 37 2024-11-19T12:18:14,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-19T12:18:14,493 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T12:18:14,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741968_1144 (size=960) 2024-11-19T12:18:14,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-19T12:18:14,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-19T12:18:14,921 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22 2024-11-19T12:18:14,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741969_1145 (size=53) 2024-11-19T12:18:15,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-19T12:18:15,348 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:18:15,349 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 9f6f39e9e25e92ed07f6efcc92916a02, disabling compactions & flushes 2024-11-19T12:18:15,349 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:15,349 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:15,349 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. after waiting 0 ms 2024-11-19T12:18:15,349 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:15,349 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:15,349 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:15,353 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T12:18:15,353 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732018695353"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732018695353"}]},"ts":"1732018695353"} 2024-11-19T12:18:15,356 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-19T12:18:15,360 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T12:18:15,360 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018695360"}]},"ts":"1732018695360"} 2024-11-19T12:18:15,368 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-19T12:18:15,372 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9f6f39e9e25e92ed07f6efcc92916a02, ASSIGN}] 2024-11-19T12:18:15,374 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9f6f39e9e25e92ed07f6efcc92916a02, ASSIGN 2024-11-19T12:18:15,375 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=9f6f39e9e25e92ed07f6efcc92916a02, ASSIGN; state=OFFLINE, location=af314c41f984,36047,1732018661455; forceNewPlan=false, retain=false 2024-11-19T12:18:15,529 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=9f6f39e9e25e92ed07f6efcc92916a02, regionState=OPENING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:18:15,532 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; OpenRegionProcedure 9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:18:15,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-19T12:18:15,684 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:15,689 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:15,689 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7285): Opening region: {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:18:15,689 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:15,689 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:18:15,689 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7327): checking encryption for 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:15,690 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7330): checking classloading for 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:15,693 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:15,695 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:18:15,696 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9f6f39e9e25e92ed07f6efcc92916a02 columnFamilyName A 2024-11-19T12:18:15,696 DEBUG [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:15,699 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.HStore(327): Store=9f6f39e9e25e92ed07f6efcc92916a02/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:18:15,699 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:15,700 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:18:15,701 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9f6f39e9e25e92ed07f6efcc92916a02 columnFamilyName B 2024-11-19T12:18:15,701 DEBUG [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:15,702 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.HStore(327): Store=9f6f39e9e25e92ed07f6efcc92916a02/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:18:15,703 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:15,704 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:18:15,704 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9f6f39e9e25e92ed07f6efcc92916a02 columnFamilyName C 2024-11-19T12:18:15,704 DEBUG [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:15,705 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.HStore(327): Store=9f6f39e9e25e92ed07f6efcc92916a02/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:18:15,705 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:15,706 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:15,706 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:15,712 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:18:15,714 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1085): writing seq id for 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:15,716 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:18:15,717 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1102): Opened 9f6f39e9e25e92ed07f6efcc92916a02; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61050443, jitterRate=-0.09027750790119171}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:18:15,718 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1001): Region open journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:15,718 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., pid=39, masterSystemTime=1732018695684 2024-11-19T12:18:15,720 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:15,720 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:15,721 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=9f6f39e9e25e92ed07f6efcc92916a02, regionState=OPEN, openSeqNum=2, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:18:15,723 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-19T12:18:15,724 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; OpenRegionProcedure 9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 in 190 msec 2024-11-19T12:18:15,730 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-11-19T12:18:15,730 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9f6f39e9e25e92ed07f6efcc92916a02, ASSIGN in 352 msec 2024-11-19T12:18:15,731 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T12:18:15,731 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018695731"}]},"ts":"1732018695731"} 2024-11-19T12:18:15,733 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-19T12:18:15,736 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T12:18:15,738 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2490 sec 2024-11-19T12:18:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-19T12:18:16,602 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 37 completed 2024-11-19T12:18:16,604 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7f48093f to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3059cf43 2024-11-19T12:18:16,607 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ecebc90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:16,609 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:16,611 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35612, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:16,613 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T12:18:16,614 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33176, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T12:18:16,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-19T12:18:16,619 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:18:16,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-19T12:18:16,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741970_1146 (size=996) 2024-11-19T12:18:17,037 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-19T12:18:17,037 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-19T12:18:17,040 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-19T12:18:17,063 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9f6f39e9e25e92ed07f6efcc92916a02, REOPEN/MOVE}] 2024-11-19T12:18:17,063 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9f6f39e9e25e92ed07f6efcc92916a02, REOPEN/MOVE 2024-11-19T12:18:17,064 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=9f6f39e9e25e92ed07f6efcc92916a02, regionState=CLOSING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:18:17,065 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-19T12:18:17,065 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; CloseRegionProcedure 9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:18:17,217 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:17,217 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(124): Close 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:17,218 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-19T12:18:17,218 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1681): Closing 9f6f39e9e25e92ed07f6efcc92916a02, disabling compactions & flushes 2024-11-19T12:18:17,218 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:17,218 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:17,218 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. after waiting 0 ms 2024-11-19T12:18:17,218 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:17,222 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-19T12:18:17,222 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:17,222 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1635): Region close journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:17,222 WARN [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegionServer(3786): Not adding moved region record: 9f6f39e9e25e92ed07f6efcc92916a02 to self. 2024-11-19T12:18:17,224 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(170): Closed 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:17,224 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=9f6f39e9e25e92ed07f6efcc92916a02, regionState=CLOSED 2024-11-19T12:18:17,226 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-19T12:18:17,226 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; CloseRegionProcedure 9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 in 160 msec 2024-11-19T12:18:17,227 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=9f6f39e9e25e92ed07f6efcc92916a02, REOPEN/MOVE; state=CLOSED, location=af314c41f984,36047,1732018661455; forceNewPlan=false, retain=true 2024-11-19T12:18:17,377 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=9f6f39e9e25e92ed07f6efcc92916a02, regionState=OPENING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:18:17,379 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=42, state=RUNNABLE; OpenRegionProcedure 9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:18:17,530 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:17,533 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:17,534 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:18:17,534 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:17,534 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:18:17,534 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:17,534 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:17,537 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:17,537 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:18:17,543 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9f6f39e9e25e92ed07f6efcc92916a02 columnFamilyName A 2024-11-19T12:18:17,545 DEBUG [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:17,545 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.HStore(327): Store=9f6f39e9e25e92ed07f6efcc92916a02/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:18:17,546 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:17,546 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:18:17,546 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9f6f39e9e25e92ed07f6efcc92916a02 columnFamilyName B 2024-11-19T12:18:17,546 DEBUG [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:17,547 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.HStore(327): Store=9f6f39e9e25e92ed07f6efcc92916a02/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:18:17,547 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:17,547 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:18:17,548 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9f6f39e9e25e92ed07f6efcc92916a02 columnFamilyName C 2024-11-19T12:18:17,548 DEBUG [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:17,548 INFO [StoreOpener-9f6f39e9e25e92ed07f6efcc92916a02-1 {}] regionserver.HStore(327): Store=9f6f39e9e25e92ed07f6efcc92916a02/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:18:17,548 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:17,549 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:17,550 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:17,551 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:18:17,552 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:17,553 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened 9f6f39e9e25e92ed07f6efcc92916a02; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61885407, jitterRate=-0.0778355747461319}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:18:17,554 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:17,554 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., pid=44, masterSystemTime=1732018697530 2024-11-19T12:18:17,556 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:17,556 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:17,556 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=9f6f39e9e25e92ed07f6efcc92916a02, regionState=OPEN, openSeqNum=5, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:18:17,559 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=42 2024-11-19T12:18:17,559 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=42, state=SUCCESS; OpenRegionProcedure 9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 in 179 msec 2024-11-19T12:18:17,560 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-11-19T12:18:17,560 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9f6f39e9e25e92ed07f6efcc92916a02, REOPEN/MOVE in 496 msec 2024-11-19T12:18:17,563 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-19T12:18:17,563 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 522 msec 2024-11-19T12:18:17,566 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 943 msec 2024-11-19T12:18:17,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-19T12:18:17,574 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5b8114b4 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7c6ce985 2024-11-19T12:18:17,579 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ad21927, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:17,581 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6b5c4058 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4e9c3820 2024-11-19T12:18:17,584 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e48ff0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:17,585 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x77f4d875 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6cadde55 2024-11-19T12:18:17,588 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ab1874e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:17,590 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d89b50a to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@489535ee 2024-11-19T12:18:17,592 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2362c8ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:17,593 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0de2fcf6 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@36db9f87 2024-11-19T12:18:17,596 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@349a6fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:17,597 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c368568 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3286a246 2024-11-19T12:18:17,599 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c317ae0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:17,600 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1aed43b4 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1dfe463d 2024-11-19T12:18:17,603 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a0fc918, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:17,604 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6cab9ba4 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@30be7a2 2024-11-19T12:18:17,606 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77b8b9d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:17,607 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x074eb796 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5ba4762a 2024-11-19T12:18:17,609 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@696032a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:17,613 DEBUG [hconnection-0x5dd8e4db-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:17,614 DEBUG [hconnection-0x2bcf440e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:17,614 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:17,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-11-19T12:18:17,616 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35628, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:17,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-19T12:18:17,616 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:17,616 DEBUG [hconnection-0x357e1a9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:17,616 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:17,616 DEBUG [hconnection-0x709fbb51-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:17,617 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:17,617 DEBUG [hconnection-0x5fd11fbd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:17,618 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35632, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:17,618 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35646, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:17,618 DEBUG [hconnection-0x1691549f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:17,618 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35634, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:17,619 DEBUG [hconnection-0x3731620b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:17,620 DEBUG [hconnection-0x32a248f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:17,620 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35656, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:17,620 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35676, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:17,621 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35690, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:17,622 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35664, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:17,628 DEBUG [hconnection-0x68695b3e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:17,629 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35696, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:17,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:17,633 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-19T12:18:17,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:17,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:17,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:17,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:17,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:17,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:17,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:17,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018757672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:17,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:17,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018757673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:17,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:17,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018757675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:17,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:17,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018757675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:17,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:17,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018757676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:17,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111929e3708ad60b49f38f46100a8723c355_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018697629/Put/seqid=0 2024-11-19T12:18:17,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741971_1147 (size=12154) 2024-11-19T12:18:17,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-19T12:18:17,772 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:17,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-19T12:18:17,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:17,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:17,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:17,774 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:17,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:17,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:17,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:17,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018757779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:17,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:17,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018757779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:17,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:17,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018757779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:17,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:17,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018757779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:17,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:17,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018757780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:17,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-19T12:18:17,926 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:17,926 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-19T12:18:17,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:17,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:17,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:17,927 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:17,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:17,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:17,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:17,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018757981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:17,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:17,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018757982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:17,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:17,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018757983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:17,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:17,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018757983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:17,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:17,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018757983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:18,053 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T12:18:18,055 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T12:18:18,080 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:18,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-19T12:18:18,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:18,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:18,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:18,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:18,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:18,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:18,109 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:18,113 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111929e3708ad60b49f38f46100a8723c355_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111929e3708ad60b49f38f46100a8723c355_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:18,115 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/d4c7d1f568f74f0083f532a0f112c759, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:18,124 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/d4c7d1f568f74f0083f532a0f112c759 is 175, key is test_row_0/A:col10/1732018697629/Put/seqid=0 2024-11-19T12:18:18,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741972_1148 (size=30955) 2024-11-19T12:18:18,147 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/d4c7d1f568f74f0083f532a0f112c759 2024-11-19T12:18:18,187 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/25721a8f6bec48309b6c89e5cde104e9 is 50, key is test_row_0/B:col10/1732018697629/Put/seqid=0 2024-11-19T12:18:18,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741973_1149 (size=12001) 2024-11-19T12:18:18,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-19T12:18:18,234 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:18,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-19T12:18:18,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:18,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:18,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:18,235 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:18,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:18,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:18,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:18,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018758288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:18,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:18,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018758291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:18,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:18,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018758293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:18,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:18,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018758294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:18,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:18,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018758295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:18,387 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:18,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-19T12:18:18,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:18,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:18,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:18,388 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:18,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:18,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:18,541 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:18,542 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-19T12:18:18,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:18,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:18,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:18,542 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:18,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:18,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:18,619 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/25721a8f6bec48309b6c89e5cde104e9 2024-11-19T12:18:18,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/98ccab8ed63b4596ab31578bc505d2eb is 50, key is test_row_0/C:col10/1732018697629/Put/seqid=0 2024-11-19T12:18:18,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741974_1150 (size=12001) 2024-11-19T12:18:18,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/98ccab8ed63b4596ab31578bc505d2eb 2024-11-19T12:18:18,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/d4c7d1f568f74f0083f532a0f112c759 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/d4c7d1f568f74f0083f532a0f112c759 2024-11-19T12:18:18,670 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/d4c7d1f568f74f0083f532a0f112c759, entries=150, sequenceid=16, filesize=30.2 K 2024-11-19T12:18:18,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/25721a8f6bec48309b6c89e5cde104e9 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/25721a8f6bec48309b6c89e5cde104e9 2024-11-19T12:18:18,676 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/25721a8f6bec48309b6c89e5cde104e9, entries=150, sequenceid=16, filesize=11.7 K 2024-11-19T12:18:18,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/98ccab8ed63b4596ab31578bc505d2eb as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/98ccab8ed63b4596ab31578bc505d2eb 2024-11-19T12:18:18,685 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/98ccab8ed63b4596ab31578bc505d2eb, entries=150, sequenceid=16, filesize=11.7 K 2024-11-19T12:18:18,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 9f6f39e9e25e92ed07f6efcc92916a02 in 1053ms, sequenceid=16, compaction requested=false 2024-11-19T12:18:18,686 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-19T12:18:18,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:18,695 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:18,695 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-19T12:18:18,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:18,696 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-19T12:18:18,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:18,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:18,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:18,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:18,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:18,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:18,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111949824db0593042aa8413e93e8d390dcd_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018697674/Put/seqid=0 2024-11-19T12:18:18,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741975_1151 (size=12154) 2024-11-19T12:18:18,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:18,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-19T12:18:18,730 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111949824db0593042aa8413e93e8d390dcd_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111949824db0593042aa8413e93e8d390dcd_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:18,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/e8cfbc40ef984656824b6baa41cfe3ee, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:18,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/e8cfbc40ef984656824b6baa41cfe3ee is 175, key is test_row_0/A:col10/1732018697674/Put/seqid=0 2024-11-19T12:18:18,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741976_1152 (size=30955) 2024-11-19T12:18:18,746 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/e8cfbc40ef984656824b6baa41cfe3ee 2024-11-19T12:18:18,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/1b91e6861c5f4402846962898305ffab is 50, key is test_row_0/B:col10/1732018697674/Put/seqid=0 2024-11-19T12:18:18,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741977_1153 (size=12001) 2024-11-19T12:18:18,777 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/1b91e6861c5f4402846962898305ffab 2024-11-19T12:18:18,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:18,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:18,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/4f097dacc6de42139c832c276d842e66 is 50, key is test_row_0/C:col10/1732018697674/Put/seqid=0 2024-11-19T12:18:18,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741978_1154 (size=12001) 2024-11-19T12:18:18,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:18,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018758810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:18,822 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/4f097dacc6de42139c832c276d842e66 2024-11-19T12:18:18,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/e8cfbc40ef984656824b6baa41cfe3ee as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/e8cfbc40ef984656824b6baa41cfe3ee 2024-11-19T12:18:18,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:18,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018758813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:18,833 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/e8cfbc40ef984656824b6baa41cfe3ee, entries=150, sequenceid=40, filesize=30.2 K 2024-11-19T12:18:18,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:18,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018758817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:18,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:18,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018758818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:18,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:18,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018758820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:18,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/1b91e6861c5f4402846962898305ffab as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/1b91e6861c5f4402846962898305ffab 2024-11-19T12:18:18,843 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/1b91e6861c5f4402846962898305ffab, entries=150, sequenceid=40, filesize=11.7 K 2024-11-19T12:18:18,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/4f097dacc6de42139c832c276d842e66 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/4f097dacc6de42139c832c276d842e66 2024-11-19T12:18:18,850 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/4f097dacc6de42139c832c276d842e66, entries=150, sequenceid=40, filesize=11.7 K 2024-11-19T12:18:18,851 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 9f6f39e9e25e92ed07f6efcc92916a02 in 155ms, sequenceid=40, compaction requested=false 2024-11-19T12:18:18,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:18,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:18,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-11-19T12:18:18,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-11-19T12:18:18,854 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-19T12:18:18,855 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2360 sec 2024-11-19T12:18:18,858 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 1.2410 sec 2024-11-19T12:18:18,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:18,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-19T12:18:18,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:18,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:18,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:18,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:18,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:18,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:18,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411195eb19c8c7d9940929f514feb949ea238_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018698926/Put/seqid=0 2024-11-19T12:18:18,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741979_1155 (size=14594) 2024-11-19T12:18:18,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:18,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:18,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018758990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018758990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018758989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:18,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:18,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018758990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018758992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:19,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:19,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018759097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:19,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:19,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018759098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:19,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:19,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018759098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:19,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:19,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018759098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:19,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:19,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018759098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:19,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:19,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018759299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:19,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:19,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018759301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:19,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:19,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018759304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:19,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:19,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018759304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:19,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:19,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018759304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:19,319 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-19T12:18:19,352 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:19,357 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411195eb19c8c7d9940929f514feb949ea238_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411195eb19c8c7d9940929f514feb949ea238_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:19,359 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/803c5e8c6cf74142bd68d820e77c6368, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:19,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/803c5e8c6cf74142bd68d820e77c6368 is 175, key is test_row_0/A:col10/1732018698926/Put/seqid=0 2024-11-19T12:18:19,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741980_1156 (size=39549) 2024-11-19T12:18:19,381 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/803c5e8c6cf74142bd68d820e77c6368 2024-11-19T12:18:19,392 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/52bec6e78e7e4ab7a3e9cbbe442b1c69 is 50, key is test_row_0/B:col10/1732018698926/Put/seqid=0 2024-11-19T12:18:19,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741981_1157 (size=12001) 2024-11-19T12:18:19,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:19,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018759604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:19,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:19,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018759605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:19,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:19,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018759607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:19,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:19,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018759608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:19,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:19,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018759609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:19,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-19T12:18:19,723 INFO [Thread-717 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-11-19T12:18:19,725 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:19,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-19T12:18:19,727 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:19,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-19T12:18:19,728 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:19,728 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:19,812 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/52bec6e78e7e4ab7a3e9cbbe442b1c69 2024-11-19T12:18:19,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/09b04491f8614c16bf14a95b4f160336 is 50, key is test_row_0/C:col10/1732018698926/Put/seqid=0 2024-11-19T12:18:19,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-19T12:18:19,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741982_1158 (size=12001) 2024-11-19T12:18:19,843 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/09b04491f8614c16bf14a95b4f160336 2024-11-19T12:18:19,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/803c5e8c6cf74142bd68d820e77c6368 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/803c5e8c6cf74142bd68d820e77c6368 2024-11-19T12:18:19,862 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/803c5e8c6cf74142bd68d820e77c6368, entries=200, sequenceid=53, filesize=38.6 K 2024-11-19T12:18:19,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/52bec6e78e7e4ab7a3e9cbbe442b1c69 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/52bec6e78e7e4ab7a3e9cbbe442b1c69 2024-11-19T12:18:19,869 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/52bec6e78e7e4ab7a3e9cbbe442b1c69, entries=150, sequenceid=53, filesize=11.7 K 2024-11-19T12:18:19,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/09b04491f8614c16bf14a95b4f160336 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/09b04491f8614c16bf14a95b4f160336 2024-11-19T12:18:19,880 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:19,880 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-19T12:18:19,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:19,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:19,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:19,881 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:19,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:19,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:19,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/09b04491f8614c16bf14a95b4f160336, entries=150, sequenceid=53, filesize=11.7 K 2024-11-19T12:18:19,886 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 9f6f39e9e25e92ed07f6efcc92916a02 in 957ms, sequenceid=53, compaction requested=true 2024-11-19T12:18:19,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:19,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:18:19,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:19,886 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:19,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:18:19,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:19,886 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:19,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:18:19,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:19,890 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:19,890 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:19,890 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/B is initiating minor compaction (all files) 2024-11-19T12:18:19,890 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/A is initiating minor compaction (all files) 2024-11-19T12:18:19,890 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/B in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:19,890 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/A in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:19,891 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/25721a8f6bec48309b6c89e5cde104e9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/1b91e6861c5f4402846962898305ffab, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/52bec6e78e7e4ab7a3e9cbbe442b1c69] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=35.2 K 2024-11-19T12:18:19,891 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/d4c7d1f568f74f0083f532a0f112c759, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/e8cfbc40ef984656824b6baa41cfe3ee, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/803c5e8c6cf74142bd68d820e77c6368] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=99.1 K 2024-11-19T12:18:19,891 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:19,891 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/d4c7d1f568f74f0083f532a0f112c759, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/e8cfbc40ef984656824b6baa41cfe3ee, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/803c5e8c6cf74142bd68d820e77c6368] 2024-11-19T12:18:19,891 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 25721a8f6bec48309b6c89e5cde104e9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732018697629 2024-11-19T12:18:19,891 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4c7d1f568f74f0083f532a0f112c759, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732018697629 2024-11-19T12:18:19,892 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b91e6861c5f4402846962898305ffab, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732018697656 2024-11-19T12:18:19,892 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8cfbc40ef984656824b6baa41cfe3ee, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732018697656 2024-11-19T12:18:19,892 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 52bec6e78e7e4ab7a3e9cbbe442b1c69, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732018698810 2024-11-19T12:18:19,892 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 803c5e8c6cf74142bd68d820e77c6368, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732018698810 2024-11-19T12:18:19,904 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#B#compaction#138 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:19,904 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/d312a72c1c3e4daa847e23e7d837cb0d is 50, key is test_row_0/B:col10/1732018698926/Put/seqid=0 2024-11-19T12:18:19,909 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:19,924 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024111924d2ad1eb05f4d06a4da60f18156a7dd_9f6f39e9e25e92ed07f6efcc92916a02 store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:19,931 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024111924d2ad1eb05f4d06a4da60f18156a7dd_9f6f39e9e25e92ed07f6efcc92916a02, store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:19,931 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111924d2ad1eb05f4d06a4da60f18156a7dd_9f6f39e9e25e92ed07f6efcc92916a02 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:19,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741983_1159 (size=12104) 2024-11-19T12:18:19,945 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/d312a72c1c3e4daa847e23e7d837cb0d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/d312a72c1c3e4daa847e23e7d837cb0d 2024-11-19T12:18:19,951 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/B of 9f6f39e9e25e92ed07f6efcc92916a02 into d312a72c1c3e4daa847e23e7d837cb0d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:19,951 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:19,951 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/B, priority=13, startTime=1732018699886; duration=0sec 2024-11-19T12:18:19,951 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:19,951 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:B 2024-11-19T12:18:19,951 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:19,954 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:19,954 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/C is initiating minor compaction (all files) 2024-11-19T12:18:19,954 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/C in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:19,954 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/98ccab8ed63b4596ab31578bc505d2eb, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/4f097dacc6de42139c832c276d842e66, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/09b04491f8614c16bf14a95b4f160336] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=35.2 K 2024-11-19T12:18:19,955 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 98ccab8ed63b4596ab31578bc505d2eb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732018697629 2024-11-19T12:18:19,955 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f097dacc6de42139c832c276d842e66, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732018697656 2024-11-19T12:18:19,956 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 09b04491f8614c16bf14a95b4f160336, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732018698810 2024-11-19T12:18:19,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741984_1160 (size=4469) 2024-11-19T12:18:19,970 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#C#compaction#140 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:19,971 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/eea27a4b1fef46ad805105de5dcfad2f is 50, key is test_row_0/C:col10/1732018698926/Put/seqid=0 2024-11-19T12:18:19,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741985_1161 (size=12104) 2024-11-19T12:18:20,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-19T12:18:20,033 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:20,034 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-19T12:18:20,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:20,034 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-19T12:18:20,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:20,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:20,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:20,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:20,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:20,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:20,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119d396ddd11685461abec68698722d9e61_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018698990/Put/seqid=0 2024-11-19T12:18:20,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741986_1162 (size=12154) 2024-11-19T12:18:20,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:20,080 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119d396ddd11685461abec68698722d9e61_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119d396ddd11685461abec68698722d9e61_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:20,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/cd31b51f19b9494dad88c9b1fdc3ac1b, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:20,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/cd31b51f19b9494dad88c9b1fdc3ac1b is 175, key is test_row_0/A:col10/1732018698990/Put/seqid=0 2024-11-19T12:18:20,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741987_1163 (size=30955) 2024-11-19T12:18:20,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:20,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:20,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018760122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018760124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018760125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018760125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018760127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018760227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018760228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018760228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018760229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018760231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-19T12:18:20,368 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#A#compaction#139 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:20,370 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/1721ad209483409c874f01180c631e2d is 175, key is test_row_0/A:col10/1732018698926/Put/seqid=0 2024-11-19T12:18:20,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741988_1164 (size=31058) 2024-11-19T12:18:20,383 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/1721ad209483409c874f01180c631e2d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/1721ad209483409c874f01180c631e2d 2024-11-19T12:18:20,390 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/A of 9f6f39e9e25e92ed07f6efcc92916a02 into 1721ad209483409c874f01180c631e2d(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:20,391 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:20,391 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/A, priority=13, startTime=1732018699886; duration=0sec 2024-11-19T12:18:20,391 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:20,391 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:A 2024-11-19T12:18:20,396 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/eea27a4b1fef46ad805105de5dcfad2f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/eea27a4b1fef46ad805105de5dcfad2f 2024-11-19T12:18:20,402 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/C of 9f6f39e9e25e92ed07f6efcc92916a02 into eea27a4b1fef46ad805105de5dcfad2f(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:20,402 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:20,402 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/C, priority=13, startTime=1732018699886; duration=0sec 2024-11-19T12:18:20,402 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:20,402 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:C 2024-11-19T12:18:20,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018760430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018760430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018760430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018760431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018760435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,493 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/cd31b51f19b9494dad88c9b1fdc3ac1b 2024-11-19T12:18:20,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/74b69e0cb5c844ec9ec1d2b3eaa4e2d6 is 50, key is test_row_0/B:col10/1732018698990/Put/seqid=0 2024-11-19T12:18:20,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741989_1165 (size=12001) 2024-11-19T12:18:20,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018760733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018760734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,734 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018760734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018760736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,740 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:20,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018760739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:20,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-19T12:18:20,918 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/74b69e0cb5c844ec9ec1d2b3eaa4e2d6 2024-11-19T12:18:20,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/0a590c569f424a449132749205963c45 is 50, key is test_row_0/C:col10/1732018698990/Put/seqid=0 2024-11-19T12:18:20,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741990_1166 (size=12001) 2024-11-19T12:18:20,932 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/0a590c569f424a449132749205963c45 2024-11-19T12:18:20,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/cd31b51f19b9494dad88c9b1fdc3ac1b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/cd31b51f19b9494dad88c9b1fdc3ac1b 2024-11-19T12:18:20,941 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/cd31b51f19b9494dad88c9b1fdc3ac1b, entries=150, sequenceid=77, filesize=30.2 K 2024-11-19T12:18:20,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/74b69e0cb5c844ec9ec1d2b3eaa4e2d6 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/74b69e0cb5c844ec9ec1d2b3eaa4e2d6 2024-11-19T12:18:20,948 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/74b69e0cb5c844ec9ec1d2b3eaa4e2d6, entries=150, sequenceid=77, filesize=11.7 K 2024-11-19T12:18:20,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/0a590c569f424a449132749205963c45 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/0a590c569f424a449132749205963c45 2024-11-19T12:18:20,953 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/0a590c569f424a449132749205963c45, entries=150, sequenceid=77, filesize=11.7 K 2024-11-19T12:18:20,954 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 9f6f39e9e25e92ed07f6efcc92916a02 in 920ms, sequenceid=77, compaction requested=false 2024-11-19T12:18:20,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:20,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:20,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-19T12:18:20,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-19T12:18:20,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-19T12:18:20,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2290 sec 2024-11-19T12:18:20,961 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 1.2350 sec 2024-11-19T12:18:21,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:21,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-19T12:18:21,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:21,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:21,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:21,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:21,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:21,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:21,251 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119fb76cf38aa4343839b9e6c09ae659f74_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018700124/Put/seqid=0 2024-11-19T12:18:21,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741991_1167 (size=12154) 2024-11-19T12:18:21,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018761254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018761254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018761255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018761255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018761258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018761360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018761360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018761362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018761362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018761363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018761565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018761565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018761566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018761566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018761566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,658 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:21,663 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119fb76cf38aa4343839b9e6c09ae659f74_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119fb76cf38aa4343839b9e6c09ae659f74_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:21,665 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/ae87b84aac4047638773a990d4a38e1f, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:21,665 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/ae87b84aac4047638773a990d4a38e1f is 175, key is test_row_0/A:col10/1732018700124/Put/seqid=0 2024-11-19T12:18:21,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741992_1168 (size=30955) 2024-11-19T12:18:21,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-19T12:18:21,833 INFO [Thread-717 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-19T12:18:21,834 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:21,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-19T12:18:21,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-19T12:18:21,836 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:21,836 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:21,837 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:21,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018761869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,872 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018761870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018761870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018761872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:21,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018761872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:21,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-19T12:18:21,988 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:21,989 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-19T12:18:21,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:21,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:21,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:21,989 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:21,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:21,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:22,070 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/ae87b84aac4047638773a990d4a38e1f 2024-11-19T12:18:22,079 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/ca6f80c448a74c37805d47f3c584d55c is 50, key is test_row_0/B:col10/1732018700124/Put/seqid=0 2024-11-19T12:18:22,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741993_1169 (size=12001) 2024-11-19T12:18:22,095 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/ca6f80c448a74c37805d47f3c584d55c 2024-11-19T12:18:22,105 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/7698124922034a8fbbbc3d756a42a3bf is 50, key is test_row_0/C:col10/1732018700124/Put/seqid=0 2024-11-19T12:18:22,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741994_1170 (size=12001) 2024-11-19T12:18:22,114 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/7698124922034a8fbbbc3d756a42a3bf 2024-11-19T12:18:22,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/ae87b84aac4047638773a990d4a38e1f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/ae87b84aac4047638773a990d4a38e1f 2024-11-19T12:18:22,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/ae87b84aac4047638773a990d4a38e1f, entries=150, sequenceid=94, filesize=30.2 K 2024-11-19T12:18:22,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/ca6f80c448a74c37805d47f3c584d55c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/ca6f80c448a74c37805d47f3c584d55c 2024-11-19T12:18:22,128 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/ca6f80c448a74c37805d47f3c584d55c, entries=150, sequenceid=94, filesize=11.7 K 2024-11-19T12:18:22,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/7698124922034a8fbbbc3d756a42a3bf as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/7698124922034a8fbbbc3d756a42a3bf 2024-11-19T12:18:22,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/7698124922034a8fbbbc3d756a42a3bf, entries=150, sequenceid=94, filesize=11.7 K 2024-11-19T12:18:22,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-19T12:18:22,137 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 9f6f39e9e25e92ed07f6efcc92916a02 in 900ms, sequenceid=94, compaction requested=true 2024-11-19T12:18:22,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:22,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:18:22,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:22,138 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:22,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:18:22,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:22,138 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:22,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:18:22,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:22,139 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:22,139 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:22,139 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/B is initiating minor compaction (all files) 2024-11-19T12:18:22,139 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/A is initiating minor compaction (all files) 2024-11-19T12:18:22,139 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/A in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:22,139 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/1721ad209483409c874f01180c631e2d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/cd31b51f19b9494dad88c9b1fdc3ac1b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/ae87b84aac4047638773a990d4a38e1f] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=90.8 K 2024-11-19T12:18:22,139 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:22,139 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/1721ad209483409c874f01180c631e2d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/cd31b51f19b9494dad88c9b1fdc3ac1b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/ae87b84aac4047638773a990d4a38e1f] 2024-11-19T12:18:22,139 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/B in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:22,139 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/d312a72c1c3e4daa847e23e7d837cb0d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/74b69e0cb5c844ec9ec1d2b3eaa4e2d6, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/ca6f80c448a74c37805d47f3c584d55c] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=35.3 K 2024-11-19T12:18:22,140 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1721ad209483409c874f01180c631e2d, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732018698810 2024-11-19T12:18:22,140 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting d312a72c1c3e4daa847e23e7d837cb0d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732018698810 2024-11-19T12:18:22,140 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd31b51f19b9494dad88c9b1fdc3ac1b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732018698983 2024-11-19T12:18:22,141 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 74b69e0cb5c844ec9ec1d2b3eaa4e2d6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732018698983 2024-11-19T12:18:22,141 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae87b84aac4047638773a990d4a38e1f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732018700115 2024-11-19T12:18:22,141 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting ca6f80c448a74c37805d47f3c584d55c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732018700115 2024-11-19T12:18:22,141 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:22,142 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-19T12:18:22,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:22,142 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-19T12:18:22,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:22,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:22,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:22,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:22,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:22,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:22,154 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#B#compaction#147 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:22,159 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/f2b1c623b352447d91bfa9010cd44b63 is 50, key is test_row_0/B:col10/1732018700124/Put/seqid=0 2024-11-19T12:18:22,172 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:22,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411197af415778c404cd187b676254c821088_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018701252/Put/seqid=0 2024-11-19T12:18:22,192 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411198150e31e48f0444cae6464a8c7d276cf_9f6f39e9e25e92ed07f6efcc92916a02 store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:22,195 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411198150e31e48f0444cae6464a8c7d276cf_9f6f39e9e25e92ed07f6efcc92916a02, store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:22,195 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411198150e31e48f0444cae6464a8c7d276cf_9f6f39e9e25e92ed07f6efcc92916a02 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:22,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741995_1171 (size=12207) 2024-11-19T12:18:22,207 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/f2b1c623b352447d91bfa9010cd44b63 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/f2b1c623b352447d91bfa9010cd44b63 2024-11-19T12:18:22,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741996_1172 (size=12154) 2024-11-19T12:18:22,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:22,213 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/B of 9f6f39e9e25e92ed07f6efcc92916a02 into f2b1c623b352447d91bfa9010cd44b63(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:22,213 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:22,213 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/B, priority=13, startTime=1732018702138; duration=0sec 2024-11-19T12:18:22,213 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:22,213 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:B 2024-11-19T12:18:22,213 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:22,215 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:22,216 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/C is initiating minor compaction (all files) 2024-11-19T12:18:22,216 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/C in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:22,216 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/eea27a4b1fef46ad805105de5dcfad2f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/0a590c569f424a449132749205963c45, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/7698124922034a8fbbbc3d756a42a3bf] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=35.3 K 2024-11-19T12:18:22,217 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting eea27a4b1fef46ad805105de5dcfad2f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732018698810 2024-11-19T12:18:22,218 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a590c569f424a449132749205963c45, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732018698983 2024-11-19T12:18:22,219 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 7698124922034a8fbbbc3d756a42a3bf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732018700115 2024-11-19T12:18:22,219 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411197af415778c404cd187b676254c821088_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411197af415778c404cd187b676254c821088_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:22,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/b64339a00d7547ef9788479741536afc, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:22,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/b64339a00d7547ef9788479741536afc is 175, key is test_row_0/A:col10/1732018701252/Put/seqid=0 2024-11-19T12:18:22,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741997_1173 (size=4469) 2024-11-19T12:18:22,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741998_1174 (size=30955) 2024-11-19T12:18:22,234 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#C#compaction#150 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:22,235 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/e39a8307a7104e71b5e3d937c9c20d6b is 50, key is test_row_0/C:col10/1732018700124/Put/seqid=0 2024-11-19T12:18:22,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741999_1175 (size=12207) 2024-11-19T12:18:22,271 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/e39a8307a7104e71b5e3d937c9c20d6b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/e39a8307a7104e71b5e3d937c9c20d6b 2024-11-19T12:18:22,276 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/C of 9f6f39e9e25e92ed07f6efcc92916a02 into e39a8307a7104e71b5e3d937c9c20d6b(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:22,276 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:22,276 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/C, priority=13, startTime=1732018702138; duration=0sec 2024-11-19T12:18:22,276 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:22,276 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:C 2024-11-19T12:18:22,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:22,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:22,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:22,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018762410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:22,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:22,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018762410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:22,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:22,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018762410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:22,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:22,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018762411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:22,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:22,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018762411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:22,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-19T12:18:22,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:22,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018762514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:22,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:22,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018762514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:22,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:22,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:22,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018762514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:22,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018762515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:22,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:22,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018762515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:22,631 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=115, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/b64339a00d7547ef9788479741536afc 2024-11-19T12:18:22,632 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#A#compaction#148 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:22,632 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/264abe0e0dc84c4f910f0aec9ceed26d is 175, key is test_row_0/A:col10/1732018700124/Put/seqid=0 2024-11-19T12:18:22,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742000_1176 (size=31161) 2024-11-19T12:18:22,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/d45f9278318d4fd3b5a561f35fbda132 is 50, key is test_row_0/B:col10/1732018701252/Put/seqid=0 2024-11-19T12:18:22,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742001_1177 (size=12001) 2024-11-19T12:18:22,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:22,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018762716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:22,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:22,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018762717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:22,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:22,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018762718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:22,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:22,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018762718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:22,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:22,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018762718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:22,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-19T12:18:23,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018763018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018763022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018763023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018763024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018763024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,044 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/264abe0e0dc84c4f910f0aec9ceed26d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/264abe0e0dc84c4f910f0aec9ceed26d 2024-11-19T12:18:23,045 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/d45f9278318d4fd3b5a561f35fbda132 2024-11-19T12:18:23,051 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/A of 9f6f39e9e25e92ed07f6efcc92916a02 into 264abe0e0dc84c4f910f0aec9ceed26d(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:23,051 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:23,051 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/A, priority=13, startTime=1732018702137; duration=0sec 2024-11-19T12:18:23,051 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:23,051 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:A 2024-11-19T12:18:23,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/54b90538ebf9463692a485705a771f0d is 50, key is test_row_0/C:col10/1732018701252/Put/seqid=0 2024-11-19T12:18:23,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742002_1178 (size=12001) 2024-11-19T12:18:23,468 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/54b90538ebf9463692a485705a771f0d 2024-11-19T12:18:23,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/b64339a00d7547ef9788479741536afc as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/b64339a00d7547ef9788479741536afc 2024-11-19T12:18:23,480 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/b64339a00d7547ef9788479741536afc, entries=150, sequenceid=115, filesize=30.2 K 2024-11-19T12:18:23,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/d45f9278318d4fd3b5a561f35fbda132 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/d45f9278318d4fd3b5a561f35fbda132 2024-11-19T12:18:23,485 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/d45f9278318d4fd3b5a561f35fbda132, entries=150, sequenceid=115, filesize=11.7 K 2024-11-19T12:18:23,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/54b90538ebf9463692a485705a771f0d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/54b90538ebf9463692a485705a771f0d 2024-11-19T12:18:23,490 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/54b90538ebf9463692a485705a771f0d, entries=150, sequenceid=115, filesize=11.7 K 2024-11-19T12:18:23,491 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 9f6f39e9e25e92ed07f6efcc92916a02 in 1349ms, sequenceid=115, compaction requested=false 2024-11-19T12:18:23,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:23,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:23,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-19T12:18:23,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-19T12:18:23,493 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-19T12:18:23,493 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6550 sec 2024-11-19T12:18:23,494 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 1.6590 sec 2024-11-19T12:18:23,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:23,522 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-19T12:18:23,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:23,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:23,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:23,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:23,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:23,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:23,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119c47606c809424a759adaa1cf15dfd3ed_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018702410/Put/seqid=0 2024-11-19T12:18:23,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018763540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018763543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018763543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018763543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018763544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742003_1179 (size=14694) 2024-11-19T12:18:23,562 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:23,567 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119c47606c809424a759adaa1cf15dfd3ed_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119c47606c809424a759adaa1cf15dfd3ed_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:23,568 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/f677d0e937c44833b087a820951f8c06, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:23,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/f677d0e937c44833b087a820951f8c06 is 175, key is test_row_0/A:col10/1732018702410/Put/seqid=0 2024-11-19T12:18:23,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742004_1180 (size=39649) 2024-11-19T12:18:23,592 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/f677d0e937c44833b087a820951f8c06 2024-11-19T12:18:23,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/355ad2c7378945808eb0626ab883f566 is 50, key is test_row_0/B:col10/1732018702410/Put/seqid=0 2024-11-19T12:18:23,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742005_1181 (size=12101) 2024-11-19T12:18:23,645 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018763645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018763646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018763647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018763647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018763647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018763847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018763851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018763851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018763851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:23,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018763851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:23,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-19T12:18:23,940 INFO [Thread-717 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-19T12:18:23,941 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:23,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-19T12:18:23,942 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:23,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-19T12:18:23,943 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:23,943 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:24,004 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/355ad2c7378945808eb0626ab883f566 2024-11-19T12:18:24,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/8014a1cc5f3d44aca153d14f2082b6a7 is 50, key is test_row_0/C:col10/1732018702410/Put/seqid=0 2024-11-19T12:18:24,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742006_1182 (size=12101) 2024-11-19T12:18:24,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-19T12:18:24,095 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:24,095 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-19T12:18:24,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:24,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:24,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:24,096 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:24,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:24,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:24,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018764151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018764155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018764155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018764156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018764156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-19T12:18:24,248 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:24,249 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-19T12:18:24,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:24,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:24,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:24,249 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:24,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:24,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:24,401 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:24,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-19T12:18:24,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:24,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:24,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:24,402 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:24,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:24,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:24,418 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/8014a1cc5f3d44aca153d14f2082b6a7 2024-11-19T12:18:24,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/f677d0e937c44833b087a820951f8c06 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f677d0e937c44833b087a820951f8c06 2024-11-19T12:18:24,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f677d0e937c44833b087a820951f8c06, entries=200, sequenceid=135, filesize=38.7 K 2024-11-19T12:18:24,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/355ad2c7378945808eb0626ab883f566 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/355ad2c7378945808eb0626ab883f566 2024-11-19T12:18:24,432 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/355ad2c7378945808eb0626ab883f566, entries=150, sequenceid=135, filesize=11.8 K 2024-11-19T12:18:24,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/8014a1cc5f3d44aca153d14f2082b6a7 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/8014a1cc5f3d44aca153d14f2082b6a7 2024-11-19T12:18:24,437 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/8014a1cc5f3d44aca153d14f2082b6a7, entries=150, sequenceid=135, filesize=11.8 K 2024-11-19T12:18:24,438 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 9f6f39e9e25e92ed07f6efcc92916a02 in 915ms, sequenceid=135, compaction requested=true 2024-11-19T12:18:24,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:24,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:18:24,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:24,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:18:24,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:24,438 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:24,438 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:24,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:18:24,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:24,439 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:24,439 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101765 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:24,439 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/B is initiating minor compaction (all files) 2024-11-19T12:18:24,439 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/A is initiating minor compaction (all files) 2024-11-19T12:18:24,439 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/B in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:24,439 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/A in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:24,439 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/f2b1c623b352447d91bfa9010cd44b63, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/d45f9278318d4fd3b5a561f35fbda132, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/355ad2c7378945808eb0626ab883f566] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=35.5 K 2024-11-19T12:18:24,440 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/264abe0e0dc84c4f910f0aec9ceed26d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/b64339a00d7547ef9788479741536afc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f677d0e937c44833b087a820951f8c06] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=99.4 K 2024-11-19T12:18:24,440 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:24,440 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/264abe0e0dc84c4f910f0aec9ceed26d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/b64339a00d7547ef9788479741536afc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f677d0e937c44833b087a820951f8c06] 2024-11-19T12:18:24,440 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting f2b1c623b352447d91bfa9010cd44b63, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732018700115 2024-11-19T12:18:24,440 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 264abe0e0dc84c4f910f0aec9ceed26d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732018700115 2024-11-19T12:18:24,441 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting d45f9278318d4fd3b5a561f35fbda132, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732018701252 2024-11-19T12:18:24,441 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting b64339a00d7547ef9788479741536afc, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732018701252 2024-11-19T12:18:24,441 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 355ad2c7378945808eb0626ab883f566, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732018702383 2024-11-19T12:18:24,441 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting f677d0e937c44833b087a820951f8c06, keycount=200, bloomtype=ROW, size=38.7 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732018702377 2024-11-19T12:18:24,448 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:24,449 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#B#compaction#156 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:24,450 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/e03716afc772476ea60406d9df0b1a7e is 50, key is test_row_0/B:col10/1732018702410/Put/seqid=0 2024-11-19T12:18:24,451 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411194f5baa65bb9d47cfa464b08a7a2d00f2_9f6f39e9e25e92ed07f6efcc92916a02 store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:24,454 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411194f5baa65bb9d47cfa464b08a7a2d00f2_9f6f39e9e25e92ed07f6efcc92916a02, store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:24,454 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411194f5baa65bb9d47cfa464b08a7a2d00f2_9f6f39e9e25e92ed07f6efcc92916a02 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:24,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742007_1183 (size=12409) 2024-11-19T12:18:24,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742008_1184 (size=4469) 2024-11-19T12:18:24,466 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/e03716afc772476ea60406d9df0b1a7e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/e03716afc772476ea60406d9df0b1a7e 2024-11-19T12:18:24,471 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/B of 9f6f39e9e25e92ed07f6efcc92916a02 into e03716afc772476ea60406d9df0b1a7e(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:24,471 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:24,471 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/B, priority=13, startTime=1732018704438; duration=0sec 2024-11-19T12:18:24,471 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:24,471 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:B 2024-11-19T12:18:24,471 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:24,472 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:24,472 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/C is initiating minor compaction (all files) 2024-11-19T12:18:24,472 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/C in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:24,472 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/e39a8307a7104e71b5e3d937c9c20d6b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/54b90538ebf9463692a485705a771f0d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/8014a1cc5f3d44aca153d14f2082b6a7] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=35.5 K 2024-11-19T12:18:24,473 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting e39a8307a7104e71b5e3d937c9c20d6b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732018700115 2024-11-19T12:18:24,473 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 54b90538ebf9463692a485705a771f0d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732018701252 2024-11-19T12:18:24,474 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 8014a1cc5f3d44aca153d14f2082b6a7, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732018702383 2024-11-19T12:18:24,482 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#C#compaction#158 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:24,483 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/1ab41380d77641bcb9541fbb05caf105 is 50, key is test_row_0/C:col10/1732018702410/Put/seqid=0 2024-11-19T12:18:24,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742009_1185 (size=12409) 2024-11-19T12:18:24,492 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/1ab41380d77641bcb9541fbb05caf105 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/1ab41380d77641bcb9541fbb05caf105 2024-11-19T12:18:24,498 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/C of 9f6f39e9e25e92ed07f6efcc92916a02 into 1ab41380d77641bcb9541fbb05caf105(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:24,498 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:24,498 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/C, priority=13, startTime=1732018704438; duration=0sec 2024-11-19T12:18:24,498 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:24,498 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:C 2024-11-19T12:18:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-19T12:18:24,554 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:24,555 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-19T12:18:24,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:24,555 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-19T12:18:24,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:24,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:24,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:24,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:24,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:24,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:24,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411196c919998fd5b4bea881df3f7615685d8_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018703528/Put/seqid=0 2024-11-19T12:18:24,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742010_1186 (size=12304) 2024-11-19T12:18:24,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:24,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:24,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018764668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018764669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018764670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018764671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018764671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018764772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018764772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018764772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018764773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018764773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,864 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#A#compaction#157 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:24,865 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/f2118d10415c4ad284dab1c699a20f8e is 175, key is test_row_0/A:col10/1732018702410/Put/seqid=0 2024-11-19T12:18:24,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742011_1187 (size=31363) 2024-11-19T12:18:24,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:24,973 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411196c919998fd5b4bea881df3f7615685d8_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411196c919998fd5b4bea881df3f7615685d8_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:24,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018764975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018764975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/03d6decdf837410892cca396234647d5, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:24,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/03d6decdf837410892cca396234647d5 is 175, key is test_row_0/A:col10/1732018703528/Put/seqid=0 2024-11-19T12:18:24,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018764975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018764977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:24,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018764977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:24,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742012_1188 (size=31105) 2024-11-19T12:18:24,983 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=156, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/03d6decdf837410892cca396234647d5 2024-11-19T12:18:24,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/fd68cd3dca5140f0964ddb778d98d500 is 50, key is test_row_0/B:col10/1732018703528/Put/seqid=0 2024-11-19T12:18:24,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742013_1189 (size=12151) 2024-11-19T12:18:24,995 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/fd68cd3dca5140f0964ddb778d98d500 2024-11-19T12:18:25,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/a6e11fd7a0af43428dc352b9c593ff89 is 50, key is test_row_0/C:col10/1732018703528/Put/seqid=0 2024-11-19T12:18:25,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742014_1190 (size=12151) 2024-11-19T12:18:25,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-19T12:18:25,276 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/f2118d10415c4ad284dab1c699a20f8e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f2118d10415c4ad284dab1c699a20f8e 2024-11-19T12:18:25,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:25,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018765279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:25,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:25,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018765280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:25,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:25,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018765280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:25,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:25,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018765280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:25,283 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/A of 9f6f39e9e25e92ed07f6efcc92916a02 into f2118d10415c4ad284dab1c699a20f8e(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:25,283 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:25,283 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/A, priority=13, startTime=1732018704438; duration=0sec 2024-11-19T12:18:25,283 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:25,283 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:A 2024-11-19T12:18:25,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:25,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018765283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:25,408 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/a6e11fd7a0af43428dc352b9c593ff89 2024-11-19T12:18:25,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/03d6decdf837410892cca396234647d5 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/03d6decdf837410892cca396234647d5 2024-11-19T12:18:25,418 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/03d6decdf837410892cca396234647d5, entries=150, sequenceid=156, filesize=30.4 K 2024-11-19T12:18:25,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/fd68cd3dca5140f0964ddb778d98d500 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/fd68cd3dca5140f0964ddb778d98d500 2024-11-19T12:18:25,423 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/fd68cd3dca5140f0964ddb778d98d500, entries=150, sequenceid=156, filesize=11.9 K 2024-11-19T12:18:25,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/a6e11fd7a0af43428dc352b9c593ff89 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/a6e11fd7a0af43428dc352b9c593ff89 2024-11-19T12:18:25,428 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/a6e11fd7a0af43428dc352b9c593ff89, entries=150, sequenceid=156, filesize=11.9 K 2024-11-19T12:18:25,429 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 9f6f39e9e25e92ed07f6efcc92916a02 in 874ms, sequenceid=156, compaction requested=false 2024-11-19T12:18:25,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:25,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:25,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-19T12:18:25,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-19T12:18:25,432 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-19T12:18:25,432 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4870 sec 2024-11-19T12:18:25,433 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.4910 sec 2024-11-19T12:18:25,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:25,784 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-19T12:18:25,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:25,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:25,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:25,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:25,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:25,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:25,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119be061c0e19894cb8bb878eacc098622d_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018704669/Put/seqid=0 2024-11-19T12:18:25,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:25,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018765795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:25,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:25,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018765796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:25,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:25,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018765796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:25,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:25,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018765798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:25,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:25,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018765798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:25,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742015_1191 (size=14794) 2024-11-19T12:18:25,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:25,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018765899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:25,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:25,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018765899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:25,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:25,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018765900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:25,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:25,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018765901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:25,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:25,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018765902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:26,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-19T12:18:26,047 INFO [Thread-717 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-19T12:18:26,048 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:26,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-19T12:18:26,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-19T12:18:26,050 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:26,050 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:26,050 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:26,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:26,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018766101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:26,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:26,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018766102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:26,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:26,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018766103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:26,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:26,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018766104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:26,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:26,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018766104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:26,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-19T12:18:26,201 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:26,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-19T12:18:26,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:26,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:26,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:26,202 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:26,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:26,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:26,207 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:26,212 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119be061c0e19894cb8bb878eacc098622d_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119be061c0e19894cb8bb878eacc098622d_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:26,213 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/7990966dc2ff46ca80bbef0d60524544, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:26,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/7990966dc2ff46ca80bbef0d60524544 is 175, key is test_row_0/A:col10/1732018704669/Put/seqid=0 2024-11-19T12:18:26,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742016_1192 (size=39749) 2024-11-19T12:18:26,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-19T12:18:26,354 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:26,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-19T12:18:26,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:26,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:26,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:26,355 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:26,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:26,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:26,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:26,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018766403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:26,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:26,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018766406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:26,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:26,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018766407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:26,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:26,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018766408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:26,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:26,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018766408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:26,507 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:26,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-19T12:18:26,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:26,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:26,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:26,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:26,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:26,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:26,619 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=175, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/7990966dc2ff46ca80bbef0d60524544 2024-11-19T12:18:26,628 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/d8c77dd9ce054861b03a0c0e4630c911 is 50, key is test_row_0/B:col10/1732018704669/Put/seqid=0 2024-11-19T12:18:26,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742017_1193 (size=12151) 2024-11-19T12:18:26,636 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/d8c77dd9ce054861b03a0c0e4630c911 2024-11-19T12:18:26,646 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/57c75dbdc49f44dc8cf70b84d9b2dcb9 is 50, key is test_row_0/C:col10/1732018704669/Put/seqid=0 2024-11-19T12:18:26,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-19T12:18:26,660 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:26,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-19T12:18:26,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:26,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:26,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:26,661 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:26,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:26,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:26,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742018_1194 (size=12151) 2024-11-19T12:18:26,678 INFO [master/af314c41f984:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T12:18:26,678 INFO [master/af314c41f984:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T12:18:26,813 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:26,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-19T12:18:26,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:26,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:26,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:26,814 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:26,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:26,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:26,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:26,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018766907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:26,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:26,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018766913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:26,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:26,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018766914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:26,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:26,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018766915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:26,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:26,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018766917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:26,966 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:26,966 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-19T12:18:26,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:26,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:26,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:26,966 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:26,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:26,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:27,067 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/57c75dbdc49f44dc8cf70b84d9b2dcb9 2024-11-19T12:18:27,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/7990966dc2ff46ca80bbef0d60524544 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/7990966dc2ff46ca80bbef0d60524544 2024-11-19T12:18:27,077 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/7990966dc2ff46ca80bbef0d60524544, entries=200, sequenceid=175, filesize=38.8 K 2024-11-19T12:18:27,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/d8c77dd9ce054861b03a0c0e4630c911 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/d8c77dd9ce054861b03a0c0e4630c911 2024-11-19T12:18:27,082 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/d8c77dd9ce054861b03a0c0e4630c911, entries=150, sequenceid=175, filesize=11.9 K 2024-11-19T12:18:27,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/57c75dbdc49f44dc8cf70b84d9b2dcb9 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/57c75dbdc49f44dc8cf70b84d9b2dcb9 2024-11-19T12:18:27,087 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/57c75dbdc49f44dc8cf70b84d9b2dcb9, entries=150, sequenceid=175, filesize=11.9 K 2024-11-19T12:18:27,088 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 9f6f39e9e25e92ed07f6efcc92916a02 in 1304ms, sequenceid=175, compaction requested=true 2024-11-19T12:18:27,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:27,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:18:27,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:27,089 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:27,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:18:27,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:27,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:18:27,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:27,089 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:27,090 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102217 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:27,090 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/A is initiating minor compaction (all files) 2024-11-19T12:18:27,090 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:27,090 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/A in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:27,090 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/B is initiating minor compaction (all files) 2024-11-19T12:18:27,090 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f2118d10415c4ad284dab1c699a20f8e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/03d6decdf837410892cca396234647d5, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/7990966dc2ff46ca80bbef0d60524544] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=99.8 K 2024-11-19T12:18:27,090 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/B in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:27,090 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:27,090 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/e03716afc772476ea60406d9df0b1a7e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/fd68cd3dca5140f0964ddb778d98d500, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/d8c77dd9ce054861b03a0c0e4630c911] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=35.9 K 2024-11-19T12:18:27,090 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f2118d10415c4ad284dab1c699a20f8e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/03d6decdf837410892cca396234647d5, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/7990966dc2ff46ca80bbef0d60524544] 2024-11-19T12:18:27,091 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting e03716afc772476ea60406d9df0b1a7e, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732018702383 2024-11-19T12:18:27,091 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting f2118d10415c4ad284dab1c699a20f8e, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732018702383 2024-11-19T12:18:27,091 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting fd68cd3dca5140f0964ddb778d98d500, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732018703528 2024-11-19T12:18:27,091 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03d6decdf837410892cca396234647d5, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732018703528 2024-11-19T12:18:27,092 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting d8c77dd9ce054861b03a0c0e4630c911, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732018704669 2024-11-19T12:18:27,092 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7990966dc2ff46ca80bbef0d60524544, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732018704663 2024-11-19T12:18:27,100 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:27,101 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#B#compaction#165 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:27,102 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/76889f7515a549358a014314b0a185d1 is 50, key is test_row_0/B:col10/1732018704669/Put/seqid=0 2024-11-19T12:18:27,103 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241119fe896727325543978ee1113444d641d2_9f6f39e9e25e92ed07f6efcc92916a02 store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:27,105 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241119fe896727325543978ee1113444d641d2_9f6f39e9e25e92ed07f6efcc92916a02, store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:27,105 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119fe896727325543978ee1113444d641d2_9f6f39e9e25e92ed07f6efcc92916a02 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:27,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742020_1196 (size=4469) 2024-11-19T12:18:27,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742019_1195 (size=12561) 2024-11-19T12:18:27,118 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:27,119 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-19T12:18:27,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:27,119 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-19T12:18:27,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:27,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:27,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:27,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:27,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:27,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:27,124 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/76889f7515a549358a014314b0a185d1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/76889f7515a549358a014314b0a185d1 2024-11-19T12:18:27,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119365fc5b0a64f47e6b6e6f5bd35609108_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018705789/Put/seqid=0 2024-11-19T12:18:27,133 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/B of 9f6f39e9e25e92ed07f6efcc92916a02 into 76889f7515a549358a014314b0a185d1(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:27,133 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:27,133 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/B, priority=13, startTime=1732018707089; duration=0sec 2024-11-19T12:18:27,133 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:27,133 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:B 2024-11-19T12:18:27,133 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:27,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742021_1197 (size=12304) 2024-11-19T12:18:27,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:27,136 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:27,136 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/C is initiating minor compaction (all files) 2024-11-19T12:18:27,136 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/C in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:27,136 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/1ab41380d77641bcb9541fbb05caf105, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/a6e11fd7a0af43428dc352b9c593ff89, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/57c75dbdc49f44dc8cf70b84d9b2dcb9] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=35.9 K 2024-11-19T12:18:27,137 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ab41380d77641bcb9541fbb05caf105, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732018702383 2024-11-19T12:18:27,137 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting a6e11fd7a0af43428dc352b9c593ff89, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732018703528 2024-11-19T12:18:27,138 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 57c75dbdc49f44dc8cf70b84d9b2dcb9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732018704669 2024-11-19T12:18:27,139 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119365fc5b0a64f47e6b6e6f5bd35609108_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119365fc5b0a64f47e6b6e6f5bd35609108_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:27,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/bd3f5ea2d1c441e59184e0d942f11ce4, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:27,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/bd3f5ea2d1c441e59184e0d942f11ce4 is 175, key is test_row_0/A:col10/1732018705789/Put/seqid=0 2024-11-19T12:18:27,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742022_1198 (size=31105) 2024-11-19T12:18:27,150 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#C#compaction#168 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:27,150 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/d2306a828544432a93dc23449d6cc5cb is 50, key is test_row_0/C:col10/1732018704669/Put/seqid=0 2024-11-19T12:18:27,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-19T12:18:27,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742023_1199 (size=12561) 2024-11-19T12:18:27,514 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#A#compaction#166 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:27,515 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/47adb9b448524583ad1b21c473fc23b4 is 175, key is test_row_0/A:col10/1732018704669/Put/seqid=0 2024-11-19T12:18:27,551 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=193, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/bd3f5ea2d1c441e59184e0d942f11ce4 2024-11-19T12:18:27,563 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/d2306a828544432a93dc23449d6cc5cb as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/d2306a828544432a93dc23449d6cc5cb 2024-11-19T12:18:27,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/17d2cf6c7f4c4005b20b6fa37e88385f is 50, key is test_row_0/B:col10/1732018705789/Put/seqid=0 2024-11-19T12:18:27,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742024_1200 (size=31515) 2024-11-19T12:18:27,573 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/C of 9f6f39e9e25e92ed07f6efcc92916a02 into d2306a828544432a93dc23449d6cc5cb(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:27,573 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:27,573 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/C, priority=13, startTime=1732018707089; duration=0sec 2024-11-19T12:18:27,573 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:27,573 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:C 2024-11-19T12:18:27,576 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/47adb9b448524583ad1b21c473fc23b4 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/47adb9b448524583ad1b21c473fc23b4 2024-11-19T12:18:27,582 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/A of 9f6f39e9e25e92ed07f6efcc92916a02 into 47adb9b448524583ad1b21c473fc23b4(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:27,582 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:27,582 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/A, priority=13, startTime=1732018707089; duration=0sec 2024-11-19T12:18:27,582 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:27,582 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:A 2024-11-19T12:18:27,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742025_1201 (size=12151) 2024-11-19T12:18:27,588 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/17d2cf6c7f4c4005b20b6fa37e88385f 2024-11-19T12:18:27,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/2b23f88d5082495e9fb81511722867e5 is 50, key is test_row_0/C:col10/1732018705789/Put/seqid=0 2024-11-19T12:18:27,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742026_1202 (size=12151) 2024-11-19T12:18:27,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:27,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:27,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:27,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018767970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:27,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:27,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018767970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:27,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:27,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018767971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:27,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:27,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018767973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:27,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:27,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018767973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,007 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/2b23f88d5082495e9fb81511722867e5 2024-11-19T12:18:28,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/bd3f5ea2d1c441e59184e0d942f11ce4 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/bd3f5ea2d1c441e59184e0d942f11ce4 2024-11-19T12:18:28,018 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/bd3f5ea2d1c441e59184e0d942f11ce4, entries=150, sequenceid=193, filesize=30.4 K 2024-11-19T12:18:28,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/17d2cf6c7f4c4005b20b6fa37e88385f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/17d2cf6c7f4c4005b20b6fa37e88385f 2024-11-19T12:18:28,025 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/17d2cf6c7f4c4005b20b6fa37e88385f, entries=150, sequenceid=193, filesize=11.9 K 2024-11-19T12:18:28,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/2b23f88d5082495e9fb81511722867e5 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/2b23f88d5082495e9fb81511722867e5 2024-11-19T12:18:28,032 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/2b23f88d5082495e9fb81511722867e5, entries=150, sequenceid=193, filesize=11.9 K 2024-11-19T12:18:28,033 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 9f6f39e9e25e92ed07f6efcc92916a02 in 914ms, sequenceid=193, compaction requested=false 2024-11-19T12:18:28,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:28,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:28,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-19T12:18:28,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-19T12:18:28,036 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-19T12:18:28,036 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9840 sec 2024-11-19T12:18:28,038 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.9890 sec 2024-11-19T12:18:28,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:28,079 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-19T12:18:28,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:28,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:28,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:28,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:28,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:28,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:28,091 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411196ea4f55377f14d6096598bdaf5cc8cf6_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018708078/Put/seqid=0 2024-11-19T12:18:28,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018768093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018768094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018768095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018768096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018768097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742027_1203 (size=12304) 2024-11-19T12:18:28,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-19T12:18:28,153 INFO [Thread-717 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-19T12:18:28,155 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:28,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-19T12:18:28,157 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:28,157 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:28,158 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:28,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-19T12:18:28,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018768199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018768200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018768200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018768200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018768202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-19T12:18:28,309 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:28,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-19T12:18:28,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:28,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:28,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:28,310 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:28,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:28,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:28,403 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018768402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018768404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018768405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018768404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018768406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-19T12:18:28,463 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:28,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-19T12:18:28,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:28,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:28,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:28,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:28,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:28,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:28,525 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:28,531 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411196ea4f55377f14d6096598bdaf5cc8cf6_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411196ea4f55377f14d6096598bdaf5cc8cf6_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:28,532 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/2685c106b5864f5cac1245785c853bac, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:28,533 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/2685c106b5864f5cac1245785c853bac is 175, key is test_row_0/A:col10/1732018708078/Put/seqid=0 2024-11-19T12:18:28,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742028_1204 (size=31105) 2024-11-19T12:18:28,618 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:28,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-19T12:18:28,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:28,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:28,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:28,619 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:28,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:28,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:28,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018768708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018768710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018768711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018768711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:28,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018768708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:28,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-19T12:18:28,771 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:28,772 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-19T12:18:28,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:28,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:28,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:28,772 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:28,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:28,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:28,925 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:28,925 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-19T12:18:28,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:28,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:28,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:28,926 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:28,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:28,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:28,975 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=217, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/2685c106b5864f5cac1245785c853bac 2024-11-19T12:18:28,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/01f584b09c6b41d293b450fc18b98ec4 is 50, key is test_row_0/B:col10/1732018708078/Put/seqid=0 2024-11-19T12:18:29,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742029_1205 (size=12151) 2024-11-19T12:18:29,023 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/01f584b09c6b41d293b450fc18b98ec4 2024-11-19T12:18:29,035 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/2ab31cd85d134acabd70d813f23a3b26 is 50, key is test_row_0/C:col10/1732018708078/Put/seqid=0 2024-11-19T12:18:29,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742030_1206 (size=12151) 2024-11-19T12:18:29,051 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/2ab31cd85d134acabd70d813f23a3b26 2024-11-19T12:18:29,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/2685c106b5864f5cac1245785c853bac as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/2685c106b5864f5cac1245785c853bac 2024-11-19T12:18:29,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/2685c106b5864f5cac1245785c853bac, entries=150, sequenceid=217, filesize=30.4 K 2024-11-19T12:18:29,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/01f584b09c6b41d293b450fc18b98ec4 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/01f584b09c6b41d293b450fc18b98ec4 2024-11-19T12:18:29,078 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/01f584b09c6b41d293b450fc18b98ec4, entries=150, sequenceid=217, filesize=11.9 K 2024-11-19T12:18:29,078 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:29,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/2ab31cd85d134acabd70d813f23a3b26 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/2ab31cd85d134acabd70d813f23a3b26 2024-11-19T12:18:29,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-19T12:18:29,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:29,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:29,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:29,081 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,086 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/2ab31cd85d134acabd70d813f23a3b26, entries=150, sequenceid=217, filesize=11.9 K 2024-11-19T12:18:29,087 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 9f6f39e9e25e92ed07f6efcc92916a02 in 1008ms, sequenceid=217, compaction requested=true 2024-11-19T12:18:29,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:29,088 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:29,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:18:29,089 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93725 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:29,089 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/A is initiating minor compaction (all files) 2024-11-19T12:18:29,089 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/A in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:29,089 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/47adb9b448524583ad1b21c473fc23b4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/bd3f5ea2d1c441e59184e0d942f11ce4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/2685c106b5864f5cac1245785c853bac] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=91.5 K 2024-11-19T12:18:29,089 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:29,089 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/47adb9b448524583ad1b21c473fc23b4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/bd3f5ea2d1c441e59184e0d942f11ce4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/2685c106b5864f5cac1245785c853bac] 2024-11-19T12:18:29,092 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47adb9b448524583ad1b21c473fc23b4, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732018704669 2024-11-19T12:18:29,093 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd3f5ea2d1c441e59184e0d942f11ce4, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732018705789 2024-11-19T12:18:29,094 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2685c106b5864f5cac1245785c853bac, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732018707970 2024-11-19T12:18:29,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:29,095 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:29,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:18:29,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:29,097 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:29,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:18:29,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:29,097 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/B is initiating minor compaction (all files) 2024-11-19T12:18:29,097 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/B in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:29,097 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/76889f7515a549358a014314b0a185d1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/17d2cf6c7f4c4005b20b6fa37e88385f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/01f584b09c6b41d293b450fc18b98ec4] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=36.0 K 2024-11-19T12:18:29,097 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 76889f7515a549358a014314b0a185d1, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732018704669 2024-11-19T12:18:29,099 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 17d2cf6c7f4c4005b20b6fa37e88385f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732018705789 2024-11-19T12:18:29,100 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 01f584b09c6b41d293b450fc18b98ec4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732018707970 2024-11-19T12:18:29,113 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:29,126 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#B#compaction#175 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:29,126 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/63accf0f99f34762aa6ce91d7043ff5e is 50, key is test_row_0/B:col10/1732018708078/Put/seqid=0 2024-11-19T12:18:29,129 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241119239280a8bd574903aa22d952bb8db60f_9f6f39e9e25e92ed07f6efcc92916a02 store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:29,131 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241119239280a8bd574903aa22d952bb8db60f_9f6f39e9e25e92ed07f6efcc92916a02, store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:29,131 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119239280a8bd574903aa22d952bb8db60f_9f6f39e9e25e92ed07f6efcc92916a02 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:29,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742031_1207 (size=12663) 2024-11-19T12:18:29,175 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/63accf0f99f34762aa6ce91d7043ff5e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/63accf0f99f34762aa6ce91d7043ff5e 2024-11-19T12:18:29,181 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/B of 9f6f39e9e25e92ed07f6efcc92916a02 into 63accf0f99f34762aa6ce91d7043ff5e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:29,181 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:29,181 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/B, priority=13, startTime=1732018709095; duration=0sec 2024-11-19T12:18:29,182 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:29,182 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:B 2024-11-19T12:18:29,182 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:29,183 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:29,183 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/C is initiating minor compaction (all files) 2024-11-19T12:18:29,183 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/C in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:29,183 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/d2306a828544432a93dc23449d6cc5cb, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/2b23f88d5082495e9fb81511722867e5, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/2ab31cd85d134acabd70d813f23a3b26] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=36.0 K 2024-11-19T12:18:29,184 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting d2306a828544432a93dc23449d6cc5cb, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732018704669 2024-11-19T12:18:29,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742032_1208 (size=4469) 2024-11-19T12:18:29,185 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b23f88d5082495e9fb81511722867e5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732018705789 2024-11-19T12:18:29,186 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ab31cd85d134acabd70d813f23a3b26, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732018707970 2024-11-19T12:18:29,196 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#C#compaction#176 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:29,196 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/03d4c042a43d4406942ef60517a61e0a is 50, key is test_row_0/C:col10/1732018708078/Put/seqid=0 2024-11-19T12:18:29,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742033_1209 (size=12663) 2024-11-19T12:18:29,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:29,212 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-19T12:18:29,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:29,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:29,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:29,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:29,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:29,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:29,213 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/03d4c042a43d4406942ef60517a61e0a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/03d4c042a43d4406942ef60517a61e0a 2024-11-19T12:18:29,223 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/C of 9f6f39e9e25e92ed07f6efcc92916a02 into 03d4c042a43d4406942ef60517a61e0a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:29,223 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:29,223 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/C, priority=13, startTime=1732018709097; duration=0sec 2024-11-19T12:18:29,223 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:29,224 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:C 2024-11-19T12:18:29,228 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119ff88c177baa24083b9911f997eeaedb4_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018708089/Put/seqid=0 2024-11-19T12:18:29,233 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:29,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-19T12:18:29,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:29,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:29,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:29,234 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018769244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018769244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018769245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018769245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018769247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742034_1210 (size=14794) 2024-11-19T12:18:29,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-19T12:18:29,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018769351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018769351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018769352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018769352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018769352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,385 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:29,385 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-19T12:18:29,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:29,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:29,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:29,386 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,538 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:29,538 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-19T12:18:29,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:29,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:29,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:29,539 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018769554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018769555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018769558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018769562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018769562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,588 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#A#compaction#174 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:29,589 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/62e58e979ed24d4aa5ec8856f6b2e0b8 is 175, key is test_row_0/A:col10/1732018708078/Put/seqid=0 2024-11-19T12:18:29,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742035_1211 (size=31617) 2024-11-19T12:18:29,657 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:29,663 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119ff88c177baa24083b9911f997eeaedb4_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119ff88c177baa24083b9911f997eeaedb4_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:29,668 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/6526a4f5d35a4f1db28d41a1bc837313, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:29,669 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/6526a4f5d35a4f1db28d41a1bc837313 is 175, key is test_row_0/A:col10/1732018708089/Put/seqid=0 2024-11-19T12:18:29,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742036_1212 (size=39749) 2024-11-19T12:18:29,690 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:29,691 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-19T12:18:29,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:29,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:29,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:29,691 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,844 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:29,844 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-19T12:18:29,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:29,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:29,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:29,845 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:29,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018769858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018769862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018769864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018769866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:29,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:29,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018769869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:30,010 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:30,010 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-19T12:18:30,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:30,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:30,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:30,011 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:30,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:30,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:30,021 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/62e58e979ed24d4aa5ec8856f6b2e0b8 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/62e58e979ed24d4aa5ec8856f6b2e0b8 2024-11-19T12:18:30,027 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/A of 9f6f39e9e25e92ed07f6efcc92916a02 into 62e58e979ed24d4aa5ec8856f6b2e0b8(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:30,028 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:30,028 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/A, priority=13, startTime=1732018709088; duration=0sec 2024-11-19T12:18:30,028 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:30,028 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:A 2024-11-19T12:18:30,077 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=234, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/6526a4f5d35a4f1db28d41a1bc837313 2024-11-19T12:18:30,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/5008c44fb7a3403aa3363cf6af4f2676 is 50, key is test_row_0/B:col10/1732018708089/Put/seqid=0 2024-11-19T12:18:30,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742037_1213 (size=12151) 2024-11-19T12:18:30,163 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:30,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-19T12:18:30,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:30,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:30,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:30,164 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:30,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:30,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:30,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-19T12:18:30,317 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:30,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-19T12:18:30,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:30,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:30,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:30,318 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:30,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:30,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:30,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:30,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018770363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:30,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:30,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018770366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:30,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:30,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018770368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:30,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:30,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018770371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:30,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:30,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018770373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:30,470 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:30,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-19T12:18:30,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:30,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:30,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:30,471 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:30,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:30,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:30,491 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/5008c44fb7a3403aa3363cf6af4f2676 2024-11-19T12:18:30,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/38588620032b4ec1a659cabfdfc91715 is 50, key is test_row_0/C:col10/1732018708089/Put/seqid=0 2024-11-19T12:18:30,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742038_1214 (size=12151) 2024-11-19T12:18:30,509 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/38588620032b4ec1a659cabfdfc91715 2024-11-19T12:18:30,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/6526a4f5d35a4f1db28d41a1bc837313 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/6526a4f5d35a4f1db28d41a1bc837313 2024-11-19T12:18:30,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/6526a4f5d35a4f1db28d41a1bc837313, entries=200, sequenceid=234, filesize=38.8 K 2024-11-19T12:18:30,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/5008c44fb7a3403aa3363cf6af4f2676 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/5008c44fb7a3403aa3363cf6af4f2676 2024-11-19T12:18:30,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/5008c44fb7a3403aa3363cf6af4f2676, entries=150, sequenceid=234, filesize=11.9 K 2024-11-19T12:18:30,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/38588620032b4ec1a659cabfdfc91715 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/38588620032b4ec1a659cabfdfc91715 2024-11-19T12:18:30,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/38588620032b4ec1a659cabfdfc91715, entries=150, sequenceid=234, filesize=11.9 K 2024-11-19T12:18:30,528 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 9f6f39e9e25e92ed07f6efcc92916a02 in 1316ms, sequenceid=234, compaction requested=false 2024-11-19T12:18:30,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:30,624 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:30,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-19T12:18:30,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:30,625 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-19T12:18:30,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:30,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:30,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:30,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:30,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:30,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:30,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119eeb00e3b39ff41c7bedffc7c5ffb4ca5_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018709236/Put/seqid=0 2024-11-19T12:18:30,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742039_1215 (size=12304) 2024-11-19T12:18:30,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:30,653 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119eeb00e3b39ff41c7bedffc7c5ffb4ca5_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119eeb00e3b39ff41c7bedffc7c5ffb4ca5_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:30,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/2e6ba870e95c4da0bfadf6f45ae2bbaa, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:30,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/2e6ba870e95c4da0bfadf6f45ae2bbaa is 175, key is test_row_0/A:col10/1732018709236/Put/seqid=0 2024-11-19T12:18:30,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742040_1216 (size=31105) 2024-11-19T12:18:31,059 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=256, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/2e6ba870e95c4da0bfadf6f45ae2bbaa 2024-11-19T12:18:31,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/80253edbae0944e4b1fb4b3a05282f25 is 50, key is test_row_0/B:col10/1732018709236/Put/seqid=0 2024-11-19T12:18:31,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742041_1217 (size=12151) 2024-11-19T12:18:31,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:31,369 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:31,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:31,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018771382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:31,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:31,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018771382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:31,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:31,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018771383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:31,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:31,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018771383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:31,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:31,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018771384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:31,474 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/80253edbae0944e4b1fb4b3a05282f25 2024-11-19T12:18:31,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/868fcdac2855413da833ccadfe873e97 is 50, key is test_row_0/C:col10/1732018709236/Put/seqid=0 2024-11-19T12:18:31,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:31,489 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:31,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018771487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:31,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018771487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:31,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:31,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:31,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018771487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:31,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018771488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:31,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:31,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018771489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:31,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742042_1218 (size=12151) 2024-11-19T12:18:31,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:31,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:31,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018771691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:31,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018771691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:31,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018771692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:31,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:31,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018771692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:31,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:31,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018771692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:31,896 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/868fcdac2855413da833ccadfe873e97 2024-11-19T12:18:31,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/2e6ba870e95c4da0bfadf6f45ae2bbaa as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/2e6ba870e95c4da0bfadf6f45ae2bbaa 2024-11-19T12:18:31,906 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/2e6ba870e95c4da0bfadf6f45ae2bbaa, entries=150, sequenceid=256, filesize=30.4 K 2024-11-19T12:18:31,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/80253edbae0944e4b1fb4b3a05282f25 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/80253edbae0944e4b1fb4b3a05282f25 2024-11-19T12:18:31,916 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/80253edbae0944e4b1fb4b3a05282f25, entries=150, sequenceid=256, filesize=11.9 K 2024-11-19T12:18:31,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/868fcdac2855413da833ccadfe873e97 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/868fcdac2855413da833ccadfe873e97 2024-11-19T12:18:31,921 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/868fcdac2855413da833ccadfe873e97, entries=150, sequenceid=256, filesize=11.9 K 2024-11-19T12:18:31,922 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 9f6f39e9e25e92ed07f6efcc92916a02 in 1297ms, sequenceid=256, compaction requested=true 2024-11-19T12:18:31,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:31,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:31,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-19T12:18:31,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-19T12:18:31,925 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-19T12:18:31,925 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.7650 sec 2024-11-19T12:18:31,926 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 3.7700 sec 2024-11-19T12:18:31,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:31,995 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-19T12:18:31,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:31,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:31,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:31,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:31,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:31,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:32,006 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119a2a1075e484a420f89e7fe8fb88ed489_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018711995/Put/seqid=0 2024-11-19T12:18:32,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742043_1219 (size=12454) 2024-11-19T12:18:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018772010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018772011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,014 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:32,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018772012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018772013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,018 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119a2a1075e484a420f89e7fe8fb88ed489_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119a2a1075e484a420f89e7fe8fb88ed489_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:32,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018772015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,020 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/5cd45417f1de46298ea2f075c6bab460, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:32,020 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/5cd45417f1de46298ea2f075c6bab460 is 175, key is test_row_0/A:col10/1732018711995/Put/seqid=0 2024-11-19T12:18:32,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742044_1220 (size=31255) 2024-11-19T12:18:32,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018772115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018772115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018772115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018772118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018772120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-19T12:18:32,264 INFO [Thread-717 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-19T12:18:32,265 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:32,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-19T12:18:32,267 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:32,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-19T12:18:32,272 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:32,272 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:32,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018772321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018772321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018772322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018772324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018772324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-19T12:18:32,423 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:32,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-19T12:18:32,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:32,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:32,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:32,424 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:32,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:32,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:32,435 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=274, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/5cd45417f1de46298ea2f075c6bab460 2024-11-19T12:18:32,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/e772917ee2a44c949223eb35477a8319 is 50, key is test_row_0/B:col10/1732018711995/Put/seqid=0 2024-11-19T12:18:32,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742045_1221 (size=12301) 2024-11-19T12:18:32,577 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:32,577 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-19T12:18:32,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:32,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:32,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:32,578 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:32,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:32,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:32,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-19T12:18:32,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018772627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018772628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018772628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018772630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:32,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018772630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:32,730 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:32,730 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-19T12:18:32,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:32,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:32,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:32,731 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:32,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:32,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:32,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-19T12:18:32,881 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/e772917ee2a44c949223eb35477a8319 2024-11-19T12:18:32,883 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:32,886 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-19T12:18:32,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:32,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:32,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:32,887 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:32,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:32,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:32,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/8d695fc902b34fc3a7e42f24d57840cf is 50, key is test_row_0/C:col10/1732018711995/Put/seqid=0 2024-11-19T12:18:32,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742046_1222 (size=12301) 2024-11-19T12:18:32,946 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/8d695fc902b34fc3a7e42f24d57840cf 2024-11-19T12:18:32,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/5cd45417f1de46298ea2f075c6bab460 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/5cd45417f1de46298ea2f075c6bab460 2024-11-19T12:18:32,966 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/5cd45417f1de46298ea2f075c6bab460, entries=150, sequenceid=274, filesize=30.5 K 2024-11-19T12:18:32,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/e772917ee2a44c949223eb35477a8319 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/e772917ee2a44c949223eb35477a8319 2024-11-19T12:18:32,973 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/e772917ee2a44c949223eb35477a8319, entries=150, sequenceid=274, filesize=12.0 K 2024-11-19T12:18:32,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/8d695fc902b34fc3a7e42f24d57840cf as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/8d695fc902b34fc3a7e42f24d57840cf 2024-11-19T12:18:32,988 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/8d695fc902b34fc3a7e42f24d57840cf, entries=150, sequenceid=274, filesize=12.0 K 2024-11-19T12:18:32,989 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 9f6f39e9e25e92ed07f6efcc92916a02 in 994ms, sequenceid=274, compaction requested=true 2024-11-19T12:18:32,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:32,990 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:18:32,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:18:32,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:32,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:18:32,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:32,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:18:32,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:32,990 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:18:32,992 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133726 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:18:32,992 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/A is initiating minor compaction (all files) 2024-11-19T12:18:32,992 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/A in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:32,992 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/62e58e979ed24d4aa5ec8856f6b2e0b8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/6526a4f5d35a4f1db28d41a1bc837313, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/2e6ba870e95c4da0bfadf6f45ae2bbaa, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/5cd45417f1de46298ea2f075c6bab460] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=130.6 K 2024-11-19T12:18:32,993 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:32,993 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/62e58e979ed24d4aa5ec8856f6b2e0b8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/6526a4f5d35a4f1db28d41a1bc837313, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/2e6ba870e95c4da0bfadf6f45ae2bbaa, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/5cd45417f1de46298ea2f075c6bab460] 2024-11-19T12:18:32,993 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62e58e979ed24d4aa5ec8856f6b2e0b8, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732018707970 2024-11-19T12:18:32,994 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6526a4f5d35a4f1db28d41a1bc837313, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732018708089 2024-11-19T12:18:32,994 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49266 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:18:32,994 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/B is initiating minor compaction (all files) 2024-11-19T12:18:32,994 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/B in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:32,995 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/63accf0f99f34762aa6ce91d7043ff5e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/5008c44fb7a3403aa3363cf6af4f2676, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/80253edbae0944e4b1fb4b3a05282f25, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/e772917ee2a44c949223eb35477a8319] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=48.1 K 2024-11-19T12:18:32,995 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e6ba870e95c4da0bfadf6f45ae2bbaa, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732018709236 2024-11-19T12:18:32,995 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 63accf0f99f34762aa6ce91d7043ff5e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732018707970 2024-11-19T12:18:32,996 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 5008c44fb7a3403aa3363cf6af4f2676, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732018708089 2024-11-19T12:18:32,996 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5cd45417f1de46298ea2f075c6bab460, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732018711382 2024-11-19T12:18:32,996 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 80253edbae0944e4b1fb4b3a05282f25, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732018709236 2024-11-19T12:18:33,005 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting e772917ee2a44c949223eb35477a8319, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732018711382 2024-11-19T12:18:33,023 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#B#compaction#186 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:33,023 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/c947ad7a150a44a99711374278a23aa0 is 50, key is test_row_0/B:col10/1732018711995/Put/seqid=0 2024-11-19T12:18:33,029 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:33,041 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:33,041 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-19T12:18:33,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:33,041 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-19T12:18:33,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:33,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:33,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:33,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:33,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:33,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:33,049 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024111998088ce9ad9a48c98ab3b2c8fe88850d_9f6f39e9e25e92ed07f6efcc92916a02 store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:33,052 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024111998088ce9ad9a48c98ab3b2c8fe88850d_9f6f39e9e25e92ed07f6efcc92916a02, store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:33,052 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111998088ce9ad9a48c98ab3b2c8fe88850d_9f6f39e9e25e92ed07f6efcc92916a02 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:33,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742048_1224 (size=4469) 2024-11-19T12:18:33,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742047_1223 (size=12949) 2024-11-19T12:18:33,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411193a1e6532285f48a0822917e19847e225_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018712013/Put/seqid=0 2024-11-19T12:18:33,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:33,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:33,134 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/c947ad7a150a44a99711374278a23aa0 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/c947ad7a150a44a99711374278a23aa0 2024-11-19T12:18:33,142 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/B of 9f6f39e9e25e92ed07f6efcc92916a02 into c947ad7a150a44a99711374278a23aa0(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:33,142 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:33,143 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/B, priority=12, startTime=1732018712990; duration=0sec 2024-11-19T12:18:33,143 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:33,143 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:B 2024-11-19T12:18:33,143 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:18:33,144 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49266 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:18:33,145 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/C is initiating minor compaction (all files) 2024-11-19T12:18:33,145 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/C in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:33,145 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/03d4c042a43d4406942ef60517a61e0a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/38588620032b4ec1a659cabfdfc91715, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/868fcdac2855413da833ccadfe873e97, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/8d695fc902b34fc3a7e42f24d57840cf] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=48.1 K 2024-11-19T12:18:33,146 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 03d4c042a43d4406942ef60517a61e0a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732018707970 2024-11-19T12:18:33,146 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 38588620032b4ec1a659cabfdfc91715, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732018708089 2024-11-19T12:18:33,147 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 868fcdac2855413da833ccadfe873e97, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732018709236 2024-11-19T12:18:33,147 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d695fc902b34fc3a7e42f24d57840cf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732018711382 2024-11-19T12:18:33,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018773154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018773154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018773154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018773155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018773157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742049_1225 (size=12454) 2024-11-19T12:18:33,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:33,176 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411193a1e6532285f48a0822917e19847e225_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411193a1e6532285f48a0822917e19847e225_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:33,179 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#C#compaction#189 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:33,180 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/55889f6ff5ee4e5db6e19a460b09857e is 50, key is test_row_0/C:col10/1732018711995/Put/seqid=0 2024-11-19T12:18:33,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/f10e124e3c174baca546f59a371e1840, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:33,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/f10e124e3c174baca546f59a371e1840 is 175, key is test_row_0/A:col10/1732018712013/Put/seqid=0 2024-11-19T12:18:33,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742050_1226 (size=12949) 2024-11-19T12:18:33,215 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/55889f6ff5ee4e5db6e19a460b09857e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/55889f6ff5ee4e5db6e19a460b09857e 2024-11-19T12:18:33,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742051_1227 (size=31255) 2024-11-19T12:18:33,221 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=293, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/f10e124e3c174baca546f59a371e1840 2024-11-19T12:18:33,222 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/C of 9f6f39e9e25e92ed07f6efcc92916a02 into 55889f6ff5ee4e5db6e19a460b09857e(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:33,223 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:33,223 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/C, priority=12, startTime=1732018712990; duration=0sec 2024-11-19T12:18:33,223 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:33,223 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:C 2024-11-19T12:18:33,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/b05265634e964811af590bc264ab5d0e is 50, key is test_row_0/B:col10/1732018712013/Put/seqid=0 2024-11-19T12:18:33,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742052_1228 (size=12301) 2024-11-19T12:18:33,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018773261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018773261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018773262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018773262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018773262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-19T12:18:33,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018773465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018773465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018773465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018773466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018773467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,527 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#A#compaction#187 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:33,527 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/1d6ee066ea2d487894daf601c21173b4 is 175, key is test_row_0/A:col10/1732018711995/Put/seqid=0 2024-11-19T12:18:33,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742053_1229 (size=31903) 2024-11-19T12:18:33,644 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/b05265634e964811af590bc264ab5d0e 2024-11-19T12:18:33,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/053636572e3c46ada74856e030a47ddc is 50, key is test_row_0/C:col10/1732018712013/Put/seqid=0 2024-11-19T12:18:33,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742054_1230 (size=12301) 2024-11-19T12:18:33,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018773769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018773769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018773769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018773770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:33,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018773770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:33,939 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/1d6ee066ea2d487894daf601c21173b4 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/1d6ee066ea2d487894daf601c21173b4 2024-11-19T12:18:33,945 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/A of 9f6f39e9e25e92ed07f6efcc92916a02 into 1d6ee066ea2d487894daf601c21173b4(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:33,945 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:33,945 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/A, priority=12, startTime=1732018712990; duration=0sec 2024-11-19T12:18:33,946 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:33,946 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:A 2024-11-19T12:18:34,059 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/053636572e3c46ada74856e030a47ddc 2024-11-19T12:18:34,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/f10e124e3c174baca546f59a371e1840 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f10e124e3c174baca546f59a371e1840 2024-11-19T12:18:34,068 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f10e124e3c174baca546f59a371e1840, entries=150, sequenceid=293, filesize=30.5 K 2024-11-19T12:18:34,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/b05265634e964811af590bc264ab5d0e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/b05265634e964811af590bc264ab5d0e 2024-11-19T12:18:34,074 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/b05265634e964811af590bc264ab5d0e, entries=150, sequenceid=293, filesize=12.0 K 2024-11-19T12:18:34,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/053636572e3c46ada74856e030a47ddc as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/053636572e3c46ada74856e030a47ddc 2024-11-19T12:18:34,082 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/053636572e3c46ada74856e030a47ddc, entries=150, sequenceid=293, filesize=12.0 K 2024-11-19T12:18:34,083 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 9f6f39e9e25e92ed07f6efcc92916a02 in 1042ms, sequenceid=293, compaction requested=false 2024-11-19T12:18:34,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:34,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:34,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-19T12:18:34,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-19T12:18:34,086 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-19T12:18:34,086 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8120 sec 2024-11-19T12:18:34,087 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.8210 sec 2024-11-19T12:18:34,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:34,275 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-19T12:18:34,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:34,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:34,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:34,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:34,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:34,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:34,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111956c6c7eb00a8441e9513c4d313b0e994_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018713152/Put/seqid=0 2024-11-19T12:18:34,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018774287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018774289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018774290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018774290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018774291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742055_1231 (size=17534) 2024-11-19T12:18:34,318 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:34,323 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111956c6c7eb00a8441e9513c4d313b0e994_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111956c6c7eb00a8441e9513c4d313b0e994_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:34,324 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/f3b62001eaca4964a7e847d8d58a78ea, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:34,325 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/f3b62001eaca4964a7e847d8d58a78ea is 175, key is test_row_0/A:col10/1732018713152/Put/seqid=0 2024-11-19T12:18:34,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742056_1232 (size=48639) 2024-11-19T12:18:34,332 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=315, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/f3b62001eaca4964a7e847d8d58a78ea 2024-11-19T12:18:34,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/94af6b3c0e744bec922014d73b77b284 is 50, key is test_row_0/B:col10/1732018713152/Put/seqid=0 2024-11-19T12:18:34,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742057_1233 (size=12301) 2024-11-19T12:18:34,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-19T12:18:34,382 INFO [Thread-717 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-19T12:18:34,383 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:34,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-19T12:18:34,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-19T12:18:34,385 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:34,385 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:34,385 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:34,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018774391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018774392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018774395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018774396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018774396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-19T12:18:34,537 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:34,538 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-19T12:18:34,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:34,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:34,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:34,538 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:34,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:34,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:34,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018774594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018774595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018774600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018774600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018774601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-19T12:18:34,690 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:34,691 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-19T12:18:34,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:34,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:34,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:34,691 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:34,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:34,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:34,769 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/94af6b3c0e744bec922014d73b77b284 2024-11-19T12:18:34,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/387208b07aaa4db89ffb284e31b08a6c is 50, key is test_row_0/C:col10/1732018713152/Put/seqid=0 2024-11-19T12:18:34,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742058_1234 (size=12301) 2024-11-19T12:18:34,810 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/387208b07aaa4db89ffb284e31b08a6c 2024-11-19T12:18:34,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/f3b62001eaca4964a7e847d8d58a78ea as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f3b62001eaca4964a7e847d8d58a78ea 2024-11-19T12:18:34,832 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f3b62001eaca4964a7e847d8d58a78ea, entries=250, sequenceid=315, filesize=47.5 K 2024-11-19T12:18:34,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/94af6b3c0e744bec922014d73b77b284 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/94af6b3c0e744bec922014d73b77b284 2024-11-19T12:18:34,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/94af6b3c0e744bec922014d73b77b284, entries=150, sequenceid=315, filesize=12.0 K 2024-11-19T12:18:34,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/387208b07aaa4db89ffb284e31b08a6c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/387208b07aaa4db89ffb284e31b08a6c 2024-11-19T12:18:34,844 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:34,844 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-19T12:18:34,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:34,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:34,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:34,845 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:34,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:34,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:34,852 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/387208b07aaa4db89ffb284e31b08a6c, entries=150, sequenceid=315, filesize=12.0 K 2024-11-19T12:18:34,853 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=107.34 KB/109920 for 9f6f39e9e25e92ed07f6efcc92916a02 in 578ms, sequenceid=315, compaction requested=true 2024-11-19T12:18:34,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:34,854 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:18:34,854 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:34,854 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:34,854 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:18:34,854 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:34,854 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:34,854 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:18:34,854 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:34,856 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:34,856 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/B is initiating minor compaction (all files) 2024-11-19T12:18:34,856 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/B in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:34,856 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/c947ad7a150a44a99711374278a23aa0, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/b05265634e964811af590bc264ab5d0e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/94af6b3c0e744bec922014d73b77b284] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=36.7 K 2024-11-19T12:18:34,856 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111797 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:34,856 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/A is initiating minor compaction (all files) 2024-11-19T12:18:34,856 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/A in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:34,856 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/1d6ee066ea2d487894daf601c21173b4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f10e124e3c174baca546f59a371e1840, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f3b62001eaca4964a7e847d8d58a78ea] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=109.2 K 2024-11-19T12:18:34,856 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:34,856 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/1d6ee066ea2d487894daf601c21173b4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f10e124e3c174baca546f59a371e1840, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f3b62001eaca4964a7e847d8d58a78ea] 2024-11-19T12:18:34,857 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting c947ad7a150a44a99711374278a23aa0, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732018711382 2024-11-19T12:18:34,857 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d6ee066ea2d487894daf601c21173b4, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732018711382 2024-11-19T12:18:34,858 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting b05265634e964811af590bc264ab5d0e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732018712007 2024-11-19T12:18:34,858 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting f10e124e3c174baca546f59a371e1840, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732018712007 2024-11-19T12:18:34,858 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 94af6b3c0e744bec922014d73b77b284, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732018713152 2024-11-19T12:18:34,859 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3b62001eaca4964a7e847d8d58a78ea, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732018713151 2024-11-19T12:18:34,868 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#B#compaction#195 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:34,868 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:34,869 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/adeffb56f7f34d38b4499183fb18871d is 50, key is test_row_0/B:col10/1732018713152/Put/seqid=0 2024-11-19T12:18:34,871 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024111980066688702845f5be82a9b105b9b25d_9f6f39e9e25e92ed07f6efcc92916a02 store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:34,874 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024111980066688702845f5be82a9b105b9b25d_9f6f39e9e25e92ed07f6efcc92916a02, store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:34,874 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111980066688702845f5be82a9b105b9b25d_9f6f39e9e25e92ed07f6efcc92916a02 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:34,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742059_1235 (size=13051) 2024-11-19T12:18:34,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742060_1236 (size=4469) 2024-11-19T12:18:34,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:34,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-19T12:18:34,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:34,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:34,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:34,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:34,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:34,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:34,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119a04c9bd25cb543209449279342d5bf0e_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018714905/Put/seqid=0 2024-11-19T12:18:34,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018774917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018774917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018774918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018774919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:34,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018774920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:34,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742061_1237 (size=20074) 2024-11-19T12:18:34,939 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:34,946 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119a04c9bd25cb543209449279342d5bf0e_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119a04c9bd25cb543209449279342d5bf0e_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:34,947 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/db90bf1e47b8452d8afc768d01801c1a, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:34,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/db90bf1e47b8452d8afc768d01801c1a is 175, key is test_row_0/A:col10/1732018714905/Put/seqid=0 2024-11-19T12:18:34,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742062_1238 (size=57333) 2024-11-19T12:18:34,970 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=337, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/db90bf1e47b8452d8afc768d01801c1a 2024-11-19T12:18:34,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/c6cc3e6086044c9ea0d48d2e556b4ec5 is 50, key is test_row_0/B:col10/1732018714905/Put/seqid=0 2024-11-19T12:18:34,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742063_1239 (size=12301) 2024-11-19T12:18:34,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-19T12:18:34,997 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:34,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-19T12:18:34,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:34,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:34,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:34,998 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:34,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:34,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:35,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018775023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,027 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018775024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,027 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018775024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,027 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018775024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018775024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,150 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:35,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-19T12:18:35,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:35,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:35,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:35,151 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:35,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:35,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:35,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018775228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018775229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018775229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018775229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018775229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,296 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#A#compaction#196 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:35,297 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/43a55ad238f1480dba92a9026e692b4f is 175, key is test_row_0/A:col10/1732018713152/Put/seqid=0 2024-11-19T12:18:35,298 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/adeffb56f7f34d38b4499183fb18871d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/adeffb56f7f34d38b4499183fb18871d 2024-11-19T12:18:35,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742064_1240 (size=32005) 2024-11-19T12:18:35,303 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/B of 9f6f39e9e25e92ed07f6efcc92916a02 into adeffb56f7f34d38b4499183fb18871d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:35,303 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:35,303 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:35,303 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/B, priority=13, startTime=1732018714854; duration=0sec 2024-11-19T12:18:35,303 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:35,303 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:B 2024-11-19T12:18:35,303 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:35,304 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-19T12:18:35,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:35,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:35,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:35,304 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:35,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:35,305 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:35,305 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 9f6f39e9e25e92ed07f6efcc92916a02/C is initiating minor compaction (all files) 2024-11-19T12:18:35,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:35,305 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f6f39e9e25e92ed07f6efcc92916a02/C in TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:35,305 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/55889f6ff5ee4e5db6e19a460b09857e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/053636572e3c46ada74856e030a47ddc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/387208b07aaa4db89ffb284e31b08a6c] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp, totalSize=36.7 K 2024-11-19T12:18:35,306 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 55889f6ff5ee4e5db6e19a460b09857e, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732018711382 2024-11-19T12:18:35,306 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 053636572e3c46ada74856e030a47ddc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732018712007 2024-11-19T12:18:35,307 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 387208b07aaa4db89ffb284e31b08a6c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732018713152 2024-11-19T12:18:35,314 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f6f39e9e25e92ed07f6efcc92916a02#C#compaction#199 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:35,315 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/b63ed5faabee4bb7bf73ab494d95277e is 50, key is test_row_0/C:col10/1732018713152/Put/seqid=0 2024-11-19T12:18:35,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742065_1241 (size=13051) 2024-11-19T12:18:35,392 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/c6cc3e6086044c9ea0d48d2e556b4ec5 2024-11-19T12:18:35,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/dccd61177f674201af58ce7b4556db26 is 50, key is test_row_0/C:col10/1732018714905/Put/seqid=0 2024-11-19T12:18:35,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742066_1242 (size=12301) 2024-11-19T12:18:35,404 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/dccd61177f674201af58ce7b4556db26 2024-11-19T12:18:35,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/db90bf1e47b8452d8afc768d01801c1a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/db90bf1e47b8452d8afc768d01801c1a 2024-11-19T12:18:35,411 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/db90bf1e47b8452d8afc768d01801c1a, entries=300, sequenceid=337, filesize=56.0 K 2024-11-19T12:18:35,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/c6cc3e6086044c9ea0d48d2e556b4ec5 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/c6cc3e6086044c9ea0d48d2e556b4ec5 2024-11-19T12:18:35,416 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/c6cc3e6086044c9ea0d48d2e556b4ec5, entries=150, sequenceid=337, filesize=12.0 K 2024-11-19T12:18:35,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/dccd61177f674201af58ce7b4556db26 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/dccd61177f674201af58ce7b4556db26 2024-11-19T12:18:35,420 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/dccd61177f674201af58ce7b4556db26, entries=150, sequenceid=337, filesize=12.0 K 2024-11-19T12:18:35,421 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 9f6f39e9e25e92ed07f6efcc92916a02 in 516ms, sequenceid=337, compaction requested=false 2024-11-19T12:18:35,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:35,456 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:35,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-19T12:18:35,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:35,457 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-19T12:18:35,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:35,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:35,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:35,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:35,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:35,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:35,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411192cd2cf776d124b6ea0822bd5f51e08bf_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018714918/Put/seqid=0 2024-11-19T12:18:35,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742067_1243 (size=12454) 2024-11-19T12:18:35,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-19T12:18:35,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:35,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. as already flushing 2024-11-19T12:18:35,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018775547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018775547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018775550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018775550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018775551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018775652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018775652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018775655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018775655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018775655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,708 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/43a55ad238f1480dba92a9026e692b4f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/43a55ad238f1480dba92a9026e692b4f 2024-11-19T12:18:35,718 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/A of 9f6f39e9e25e92ed07f6efcc92916a02 into 43a55ad238f1480dba92a9026e692b4f(size=31.3 K), total size for store is 87.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:35,718 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:35,719 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/A, priority=13, startTime=1732018714854; duration=0sec 2024-11-19T12:18:35,719 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:35,719 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:A 2024-11-19T12:18:35,724 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/b63ed5faabee4bb7bf73ab494d95277e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/b63ed5faabee4bb7bf73ab494d95277e 2024-11-19T12:18:35,730 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f6f39e9e25e92ed07f6efcc92916a02/C of 9f6f39e9e25e92ed07f6efcc92916a02 into b63ed5faabee4bb7bf73ab494d95277e(size=12.7 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:35,730 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:35,730 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02., storeName=9f6f39e9e25e92ed07f6efcc92916a02/C, priority=13, startTime=1732018714854; duration=0sec 2024-11-19T12:18:35,730 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:35,731 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:C 2024-11-19T12:18:35,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018775854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018775855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018775857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018775857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:35,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018775857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:35,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:35,874 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411192cd2cf776d124b6ea0822bd5f51e08bf_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411192cd2cf776d124b6ea0822bd5f51e08bf_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:35,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/b1db3cfe71b34ffcb894b5e1ca8010f7, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:35,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/b1db3cfe71b34ffcb894b5e1ca8010f7 is 175, key is test_row_0/A:col10/1732018714918/Put/seqid=0 2024-11-19T12:18:35,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742068_1244 (size=31255) 2024-11-19T12:18:36,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:36,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018776158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:36,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:36,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018776158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:36,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:36,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018776160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:36,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:36,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018776162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:36,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:36,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018776162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:36,281 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=352, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/b1db3cfe71b34ffcb894b5e1ca8010f7 2024-11-19T12:18:36,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/cf58c4683d954fe6ab79605d79a6d191 is 50, key is test_row_0/B:col10/1732018714918/Put/seqid=0 2024-11-19T12:18:36,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742069_1245 (size=12301) 2024-11-19T12:18:36,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-19T12:18:36,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:36,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35632 deadline: 1732018776663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:36,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:36,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35656 deadline: 1732018776664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:36,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:36,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35696 deadline: 1732018776666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:36,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:36,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35646 deadline: 1732018776668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:36,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:36,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35664 deadline: 1732018776668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:36,694 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/cf58c4683d954fe6ab79605d79a6d191 2024-11-19T12:18:36,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/216b13e892b747b29fae4985d80ba53a is 50, key is test_row_0/C:col10/1732018714918/Put/seqid=0 2024-11-19T12:18:36,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742070_1246 (size=12301) 2024-11-19T12:18:37,108 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/216b13e892b747b29fae4985d80ba53a 2024-11-19T12:18:37,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/b1db3cfe71b34ffcb894b5e1ca8010f7 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/b1db3cfe71b34ffcb894b5e1ca8010f7 2024-11-19T12:18:37,120 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/b1db3cfe71b34ffcb894b5e1ca8010f7, entries=150, sequenceid=352, filesize=30.5 K 2024-11-19T12:18:37,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/cf58c4683d954fe6ab79605d79a6d191 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/cf58c4683d954fe6ab79605d79a6d191 2024-11-19T12:18:37,126 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/cf58c4683d954fe6ab79605d79a6d191, entries=150, sequenceid=352, filesize=12.0 K 2024-11-19T12:18:37,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/216b13e892b747b29fae4985d80ba53a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/216b13e892b747b29fae4985d80ba53a 2024-11-19T12:18:37,133 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/216b13e892b747b29fae4985d80ba53a, entries=150, sequenceid=352, filesize=12.0 K 2024-11-19T12:18:37,133 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 9f6f39e9e25e92ed07f6efcc92916a02 in 1676ms, sequenceid=352, compaction requested=true 2024-11-19T12:18:37,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:37,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:37,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-19T12:18:37,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-19T12:18:37,136 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-19T12:18:37,136 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7500 sec 2024-11-19T12:18:37,137 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 2.7540 sec 2024-11-19T12:18:37,614 DEBUG [Thread-720 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1aed43b4 to 127.0.0.1:64186 2024-11-19T12:18:37,614 DEBUG [Thread-722 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6cab9ba4 to 127.0.0.1:64186 2024-11-19T12:18:37,614 DEBUG [Thread-722 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:37,614 DEBUG [Thread-720 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:37,614 DEBUG [Thread-718 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c368568 to 127.0.0.1:64186 2024-11-19T12:18:37,614 DEBUG [Thread-718 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:37,615 DEBUG [Thread-724 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x074eb796 to 127.0.0.1:64186 2024-11-19T12:18:37,615 DEBUG [Thread-724 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:37,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:37,669 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-19T12:18:37,669 DEBUG [Thread-707 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5b8114b4 to 127.0.0.1:64186 2024-11-19T12:18:37,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:37,669 DEBUG [Thread-707 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:37,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:37,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:37,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:37,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:37,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:37,676 DEBUG [Thread-715 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0de2fcf6 to 127.0.0.1:64186 2024-11-19T12:18:37,676 DEBUG [Thread-715 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:37,677 DEBUG [Thread-711 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x77f4d875 to 127.0.0.1:64186 2024-11-19T12:18:37,677 DEBUG [Thread-711 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:37,678 DEBUG [Thread-713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d89b50a to 127.0.0.1:64186 2024-11-19T12:18:37,678 DEBUG [Thread-713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:37,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411190a228acbded649b9934c7dfcb7f7ccc3_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018715549/Put/seqid=0 2024-11-19T12:18:37,679 DEBUG [Thread-709 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6b5c4058 to 127.0.0.1:64186 2024-11-19T12:18:37,679 DEBUG [Thread-709 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:37,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742071_1247 (size=12454) 2024-11-19T12:18:38,084 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:38,087 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411190a228acbded649b9934c7dfcb7f7ccc3_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411190a228acbded649b9934c7dfcb7f7ccc3_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:38,088 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/5fb9915165aa4ec58fa739dff5ff2cb8, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:38,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/5fb9915165aa4ec58fa739dff5ff2cb8 is 175, key is test_row_0/A:col10/1732018715549/Put/seqid=0 2024-11-19T12:18:38,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742072_1248 (size=31255) 2024-11-19T12:18:38,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-19T12:18:38,491 INFO [Thread-717 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-19T12:18:38,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-19T12:18:38,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-11-19T12:18:38,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-11-19T12:18:38,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-11-19T12:18:38,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-11-19T12:18:38,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 52 2024-11-19T12:18:38,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-19T12:18:38,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6242 2024-11-19T12:18:38,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6081 2024-11-19T12:18:38,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-19T12:18:38,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2677 2024-11-19T12:18:38,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8031 rows 2024-11-19T12:18:38,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2696 2024-11-19T12:18:38,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8088 rows 2024-11-19T12:18:38,491 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-19T12:18:38,491 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7f48093f to 127.0.0.1:64186 2024-11-19T12:18:38,491 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:18:38,493 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=377, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/5fb9915165aa4ec58fa739dff5ff2cb8 2024-11-19T12:18:38,498 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-19T12:18:38,498 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-19T12:18:38,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-19T12:18:38,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-19T12:18:38,502 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018718501"}]},"ts":"1732018718501"} 2024-11-19T12:18:38,503 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-19T12:18:38,503 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/73a6448ce01b48aab72b6fbbc2ec192e is 50, key is test_row_0/B:col10/1732018715549/Put/seqid=0 2024-11-19T12:18:38,504 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-19T12:18:38,505 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-19T12:18:38,506 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=63, ppid=62, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9f6f39e9e25e92ed07f6efcc92916a02, UNASSIGN}] 2024-11-19T12:18:38,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742073_1249 (size=12301) 2024-11-19T12:18:38,507 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=63, ppid=62, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9f6f39e9e25e92ed07f6efcc92916a02, UNASSIGN 2024-11-19T12:18:38,507 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=63 updating hbase:meta row=9f6f39e9e25e92ed07f6efcc92916a02, regionState=CLOSING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:18:38,508 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-19T12:18:38,508 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; CloseRegionProcedure 9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:18:38,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-19T12:18:38,659 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:38,660 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(124): Close 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:38,660 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-19T12:18:38,660 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1681): Closing 9f6f39e9e25e92ed07f6efcc92916a02, disabling compactions & flushes 2024-11-19T12:18:38,660 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:38,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-19T12:18:38,907 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/73a6448ce01b48aab72b6fbbc2ec192e 2024-11-19T12:18:38,915 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/54183ef18a554610be5123ef35d6388c is 50, key is test_row_0/C:col10/1732018715549/Put/seqid=0 2024-11-19T12:18:38,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742074_1250 (size=12301) 2024-11-19T12:18:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-19T12:18:39,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/54183ef18a554610be5123ef35d6388c 2024-11-19T12:18:39,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/5fb9915165aa4ec58fa739dff5ff2cb8 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/5fb9915165aa4ec58fa739dff5ff2cb8 2024-11-19T12:18:39,342 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/5fb9915165aa4ec58fa739dff5ff2cb8, entries=150, sequenceid=377, filesize=30.5 K 2024-11-19T12:18:39,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/73a6448ce01b48aab72b6fbbc2ec192e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/73a6448ce01b48aab72b6fbbc2ec192e 2024-11-19T12:18:39,352 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/73a6448ce01b48aab72b6fbbc2ec192e, entries=150, sequenceid=377, filesize=12.0 K 2024-11-19T12:18:39,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/54183ef18a554610be5123ef35d6388c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/54183ef18a554610be5123ef35d6388c 2024-11-19T12:18:39,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/54183ef18a554610be5123ef35d6388c, entries=150, sequenceid=377, filesize=12.0 K 2024-11-19T12:18:39,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=26.84 KB/27480 for 9f6f39e9e25e92ed07f6efcc92916a02 in 1689ms, sequenceid=377, compaction requested=true 2024-11-19T12:18:39,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:39,358 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:39,358 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:39,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:18:39,358 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. after waiting 0 ms 2024-11-19T12:18:39,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:39,358 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:39,358 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. because compaction request was cancelled 2024-11-19T12:18:39,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:18:39,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:39,358 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:A 2024-11-19T12:18:39,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f6f39e9e25e92ed07f6efcc92916a02:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:18:39,358 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. because compaction request was cancelled 2024-11-19T12:18:39,358 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. because compaction request was cancelled 2024-11-19T12:18:39,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:39,358 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(2837): Flushing 9f6f39e9e25e92ed07f6efcc92916a02 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-19T12:18:39,358 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:C 2024-11-19T12:18:39,358 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f6f39e9e25e92ed07f6efcc92916a02:B 2024-11-19T12:18:39,359 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=A 2024-11-19T12:18:39,359 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:39,359 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=B 2024-11-19T12:18:39,359 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:39,359 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9f6f39e9e25e92ed07f6efcc92916a02, store=C 2024-11-19T12:18:39,359 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:39,374 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411193356f5af3fc84368bb7e01e873f26564_9f6f39e9e25e92ed07f6efcc92916a02 is 50, key is test_row_0/A:col10/1732018717678/Put/seqid=0 2024-11-19T12:18:39,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742075_1251 (size=12454) 2024-11-19T12:18:39,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-19T12:18:39,783 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:39,787 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411193356f5af3fc84368bb7e01e873f26564_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411193356f5af3fc84368bb7e01e873f26564_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:39,789 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/6da812f08a3d4363a3b3bd7419078f61, store: [table=TestAcidGuarantees family=A region=9f6f39e9e25e92ed07f6efcc92916a02] 2024-11-19T12:18:39,789 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/6da812f08a3d4363a3b3bd7419078f61 is 175, key is test_row_0/A:col10/1732018717678/Put/seqid=0 2024-11-19T12:18:39,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742076_1252 (size=31255) 2024-11-19T12:18:39,794 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=384, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/6da812f08a3d4363a3b3bd7419078f61 2024-11-19T12:18:39,801 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/8a78d39633854fc79d7ab83a14f7cc07 is 50, key is test_row_0/B:col10/1732018717678/Put/seqid=0 2024-11-19T12:18:39,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742077_1253 (size=12301) 2024-11-19T12:18:39,927 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T12:18:40,205 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=384 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/8a78d39633854fc79d7ab83a14f7cc07 2024-11-19T12:18:40,212 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/a68679d3fc00426ba9b7e1b049b262a0 is 50, key is test_row_0/C:col10/1732018717678/Put/seqid=0 2024-11-19T12:18:40,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742078_1254 (size=12301) 2024-11-19T12:18:40,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-19T12:18:40,616 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=384 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/a68679d3fc00426ba9b7e1b049b262a0 2024-11-19T12:18:40,620 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/A/6da812f08a3d4363a3b3bd7419078f61 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/6da812f08a3d4363a3b3bd7419078f61 2024-11-19T12:18:40,623 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/6da812f08a3d4363a3b3bd7419078f61, entries=150, sequenceid=384, filesize=30.5 K 2024-11-19T12:18:40,624 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/B/8a78d39633854fc79d7ab83a14f7cc07 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/8a78d39633854fc79d7ab83a14f7cc07 2024-11-19T12:18:40,627 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/8a78d39633854fc79d7ab83a14f7cc07, entries=150, sequenceid=384, filesize=12.0 K 2024-11-19T12:18:40,628 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/.tmp/C/a68679d3fc00426ba9b7e1b049b262a0 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/a68679d3fc00426ba9b7e1b049b262a0 2024-11-19T12:18:40,631 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/a68679d3fc00426ba9b7e1b049b262a0, entries=150, sequenceid=384, filesize=12.0 K 2024-11-19T12:18:40,632 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 9f6f39e9e25e92ed07f6efcc92916a02 in 1274ms, sequenceid=384, compaction requested=true 2024-11-19T12:18:40,632 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/d4c7d1f568f74f0083f532a0f112c759, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/e8cfbc40ef984656824b6baa41cfe3ee, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/803c5e8c6cf74142bd68d820e77c6368, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/1721ad209483409c874f01180c631e2d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/cd31b51f19b9494dad88c9b1fdc3ac1b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/264abe0e0dc84c4f910f0aec9ceed26d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/ae87b84aac4047638773a990d4a38e1f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/b64339a00d7547ef9788479741536afc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f677d0e937c44833b087a820951f8c06, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f2118d10415c4ad284dab1c699a20f8e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/03d6decdf837410892cca396234647d5, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/7990966dc2ff46ca80bbef0d60524544, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/47adb9b448524583ad1b21c473fc23b4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/bd3f5ea2d1c441e59184e0d942f11ce4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/62e58e979ed24d4aa5ec8856f6b2e0b8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/2685c106b5864f5cac1245785c853bac, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/6526a4f5d35a4f1db28d41a1bc837313, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/2e6ba870e95c4da0bfadf6f45ae2bbaa, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/1d6ee066ea2d487894daf601c21173b4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/5cd45417f1de46298ea2f075c6bab460, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f10e124e3c174baca546f59a371e1840, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f3b62001eaca4964a7e847d8d58a78ea] to archive 2024-11-19T12:18:40,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:18:40,635 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/d4c7d1f568f74f0083f532a0f112c759 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/d4c7d1f568f74f0083f532a0f112c759 2024-11-19T12:18:40,636 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/e8cfbc40ef984656824b6baa41cfe3ee to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/e8cfbc40ef984656824b6baa41cfe3ee 2024-11-19T12:18:40,637 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/803c5e8c6cf74142bd68d820e77c6368 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/803c5e8c6cf74142bd68d820e77c6368 2024-11-19T12:18:40,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/1721ad209483409c874f01180c631e2d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/1721ad209483409c874f01180c631e2d 2024-11-19T12:18:40,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/cd31b51f19b9494dad88c9b1fdc3ac1b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/cd31b51f19b9494dad88c9b1fdc3ac1b 2024-11-19T12:18:40,640 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/264abe0e0dc84c4f910f0aec9ceed26d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/264abe0e0dc84c4f910f0aec9ceed26d 2024-11-19T12:18:40,641 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/ae87b84aac4047638773a990d4a38e1f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/ae87b84aac4047638773a990d4a38e1f 2024-11-19T12:18:40,641 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/b64339a00d7547ef9788479741536afc to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/b64339a00d7547ef9788479741536afc 2024-11-19T12:18:40,642 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f677d0e937c44833b087a820951f8c06 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f677d0e937c44833b087a820951f8c06 2024-11-19T12:18:40,643 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f2118d10415c4ad284dab1c699a20f8e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f2118d10415c4ad284dab1c699a20f8e 2024-11-19T12:18:40,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/03d6decdf837410892cca396234647d5 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/03d6decdf837410892cca396234647d5 2024-11-19T12:18:40,645 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/7990966dc2ff46ca80bbef0d60524544 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/7990966dc2ff46ca80bbef0d60524544 2024-11-19T12:18:40,646 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/47adb9b448524583ad1b21c473fc23b4 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/47adb9b448524583ad1b21c473fc23b4 2024-11-19T12:18:40,647 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/bd3f5ea2d1c441e59184e0d942f11ce4 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/bd3f5ea2d1c441e59184e0d942f11ce4 2024-11-19T12:18:40,648 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/62e58e979ed24d4aa5ec8856f6b2e0b8 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/62e58e979ed24d4aa5ec8856f6b2e0b8 2024-11-19T12:18:40,649 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/2685c106b5864f5cac1245785c853bac to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/2685c106b5864f5cac1245785c853bac 2024-11-19T12:18:40,650 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/6526a4f5d35a4f1db28d41a1bc837313 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/6526a4f5d35a4f1db28d41a1bc837313 2024-11-19T12:18:40,651 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/2e6ba870e95c4da0bfadf6f45ae2bbaa to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/2e6ba870e95c4da0bfadf6f45ae2bbaa 2024-11-19T12:18:40,652 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/1d6ee066ea2d487894daf601c21173b4 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/1d6ee066ea2d487894daf601c21173b4 2024-11-19T12:18:40,653 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/5cd45417f1de46298ea2f075c6bab460 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/5cd45417f1de46298ea2f075c6bab460 2024-11-19T12:18:40,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f10e124e3c174baca546f59a371e1840 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f10e124e3c174baca546f59a371e1840 2024-11-19T12:18:40,655 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f3b62001eaca4964a7e847d8d58a78ea to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/f3b62001eaca4964a7e847d8d58a78ea 2024-11-19T12:18:40,656 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/25721a8f6bec48309b6c89e5cde104e9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/1b91e6861c5f4402846962898305ffab, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/d312a72c1c3e4daa847e23e7d837cb0d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/52bec6e78e7e4ab7a3e9cbbe442b1c69, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/74b69e0cb5c844ec9ec1d2b3eaa4e2d6, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/f2b1c623b352447d91bfa9010cd44b63, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/ca6f80c448a74c37805d47f3c584d55c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/d45f9278318d4fd3b5a561f35fbda132, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/e03716afc772476ea60406d9df0b1a7e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/355ad2c7378945808eb0626ab883f566, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/fd68cd3dca5140f0964ddb778d98d500, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/76889f7515a549358a014314b0a185d1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/d8c77dd9ce054861b03a0c0e4630c911, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/17d2cf6c7f4c4005b20b6fa37e88385f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/63accf0f99f34762aa6ce91d7043ff5e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/01f584b09c6b41d293b450fc18b98ec4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/5008c44fb7a3403aa3363cf6af4f2676, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/80253edbae0944e4b1fb4b3a05282f25, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/c947ad7a150a44a99711374278a23aa0, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/e772917ee2a44c949223eb35477a8319, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/b05265634e964811af590bc264ab5d0e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/94af6b3c0e744bec922014d73b77b284] to archive 2024-11-19T12:18:40,657 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:18:40,658 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/25721a8f6bec48309b6c89e5cde104e9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/25721a8f6bec48309b6c89e5cde104e9 2024-11-19T12:18:40,659 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/1b91e6861c5f4402846962898305ffab to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/1b91e6861c5f4402846962898305ffab 2024-11-19T12:18:40,660 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/d312a72c1c3e4daa847e23e7d837cb0d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/d312a72c1c3e4daa847e23e7d837cb0d 2024-11-19T12:18:40,662 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/52bec6e78e7e4ab7a3e9cbbe442b1c69 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/52bec6e78e7e4ab7a3e9cbbe442b1c69 2024-11-19T12:18:40,663 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/74b69e0cb5c844ec9ec1d2b3eaa4e2d6 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/74b69e0cb5c844ec9ec1d2b3eaa4e2d6 2024-11-19T12:18:40,664 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/f2b1c623b352447d91bfa9010cd44b63 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/f2b1c623b352447d91bfa9010cd44b63 2024-11-19T12:18:40,665 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/ca6f80c448a74c37805d47f3c584d55c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/ca6f80c448a74c37805d47f3c584d55c 2024-11-19T12:18:40,666 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/d45f9278318d4fd3b5a561f35fbda132 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/d45f9278318d4fd3b5a561f35fbda132 2024-11-19T12:18:40,667 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/e03716afc772476ea60406d9df0b1a7e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/e03716afc772476ea60406d9df0b1a7e 2024-11-19T12:18:40,669 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/355ad2c7378945808eb0626ab883f566 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/355ad2c7378945808eb0626ab883f566 2024-11-19T12:18:40,670 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/fd68cd3dca5140f0964ddb778d98d500 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/fd68cd3dca5140f0964ddb778d98d500 2024-11-19T12:18:40,671 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/76889f7515a549358a014314b0a185d1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/76889f7515a549358a014314b0a185d1 2024-11-19T12:18:40,672 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/d8c77dd9ce054861b03a0c0e4630c911 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/d8c77dd9ce054861b03a0c0e4630c911 2024-11-19T12:18:40,673 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/17d2cf6c7f4c4005b20b6fa37e88385f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/17d2cf6c7f4c4005b20b6fa37e88385f 2024-11-19T12:18:40,674 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/63accf0f99f34762aa6ce91d7043ff5e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/63accf0f99f34762aa6ce91d7043ff5e 2024-11-19T12:18:40,675 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/01f584b09c6b41d293b450fc18b98ec4 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/01f584b09c6b41d293b450fc18b98ec4 2024-11-19T12:18:40,676 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/5008c44fb7a3403aa3363cf6af4f2676 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/5008c44fb7a3403aa3363cf6af4f2676 2024-11-19T12:18:40,678 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/80253edbae0944e4b1fb4b3a05282f25 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/80253edbae0944e4b1fb4b3a05282f25 2024-11-19T12:18:40,679 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/c947ad7a150a44a99711374278a23aa0 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/c947ad7a150a44a99711374278a23aa0 2024-11-19T12:18:40,680 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/e772917ee2a44c949223eb35477a8319 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/e772917ee2a44c949223eb35477a8319 2024-11-19T12:18:40,681 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/b05265634e964811af590bc264ab5d0e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/b05265634e964811af590bc264ab5d0e 2024-11-19T12:18:40,682 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/94af6b3c0e744bec922014d73b77b284 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/94af6b3c0e744bec922014d73b77b284 2024-11-19T12:18:40,683 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/98ccab8ed63b4596ab31578bc505d2eb, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/4f097dacc6de42139c832c276d842e66, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/eea27a4b1fef46ad805105de5dcfad2f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/09b04491f8614c16bf14a95b4f160336, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/0a590c569f424a449132749205963c45, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/e39a8307a7104e71b5e3d937c9c20d6b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/7698124922034a8fbbbc3d756a42a3bf, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/54b90538ebf9463692a485705a771f0d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/1ab41380d77641bcb9541fbb05caf105, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/8014a1cc5f3d44aca153d14f2082b6a7, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/a6e11fd7a0af43428dc352b9c593ff89, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/d2306a828544432a93dc23449d6cc5cb, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/57c75dbdc49f44dc8cf70b84d9b2dcb9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/2b23f88d5082495e9fb81511722867e5, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/03d4c042a43d4406942ef60517a61e0a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/2ab31cd85d134acabd70d813f23a3b26, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/38588620032b4ec1a659cabfdfc91715, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/868fcdac2855413da833ccadfe873e97, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/55889f6ff5ee4e5db6e19a460b09857e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/8d695fc902b34fc3a7e42f24d57840cf, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/053636572e3c46ada74856e030a47ddc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/387208b07aaa4db89ffb284e31b08a6c] to archive 2024-11-19T12:18:40,684 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:18:40,685 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/98ccab8ed63b4596ab31578bc505d2eb to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/98ccab8ed63b4596ab31578bc505d2eb 2024-11-19T12:18:40,686 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/4f097dacc6de42139c832c276d842e66 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/4f097dacc6de42139c832c276d842e66 2024-11-19T12:18:40,687 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/eea27a4b1fef46ad805105de5dcfad2f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/eea27a4b1fef46ad805105de5dcfad2f 2024-11-19T12:18:40,688 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/09b04491f8614c16bf14a95b4f160336 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/09b04491f8614c16bf14a95b4f160336 2024-11-19T12:18:40,689 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/0a590c569f424a449132749205963c45 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/0a590c569f424a449132749205963c45 2024-11-19T12:18:40,690 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/e39a8307a7104e71b5e3d937c9c20d6b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/e39a8307a7104e71b5e3d937c9c20d6b 2024-11-19T12:18:40,691 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/7698124922034a8fbbbc3d756a42a3bf to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/7698124922034a8fbbbc3d756a42a3bf 2024-11-19T12:18:40,692 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/54b90538ebf9463692a485705a771f0d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/54b90538ebf9463692a485705a771f0d 2024-11-19T12:18:40,693 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/1ab41380d77641bcb9541fbb05caf105 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/1ab41380d77641bcb9541fbb05caf105 2024-11-19T12:18:40,694 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/8014a1cc5f3d44aca153d14f2082b6a7 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/8014a1cc5f3d44aca153d14f2082b6a7 2024-11-19T12:18:40,695 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/a6e11fd7a0af43428dc352b9c593ff89 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/a6e11fd7a0af43428dc352b9c593ff89 2024-11-19T12:18:40,696 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/d2306a828544432a93dc23449d6cc5cb to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/d2306a828544432a93dc23449d6cc5cb 2024-11-19T12:18:40,697 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/57c75dbdc49f44dc8cf70b84d9b2dcb9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/57c75dbdc49f44dc8cf70b84d9b2dcb9 2024-11-19T12:18:40,698 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/2b23f88d5082495e9fb81511722867e5 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/2b23f88d5082495e9fb81511722867e5 2024-11-19T12:18:40,699 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/03d4c042a43d4406942ef60517a61e0a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/03d4c042a43d4406942ef60517a61e0a 2024-11-19T12:18:40,700 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/2ab31cd85d134acabd70d813f23a3b26 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/2ab31cd85d134acabd70d813f23a3b26 2024-11-19T12:18:40,701 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/38588620032b4ec1a659cabfdfc91715 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/38588620032b4ec1a659cabfdfc91715 2024-11-19T12:18:40,702 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/868fcdac2855413da833ccadfe873e97 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/868fcdac2855413da833ccadfe873e97 2024-11-19T12:18:40,704 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/55889f6ff5ee4e5db6e19a460b09857e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/55889f6ff5ee4e5db6e19a460b09857e 2024-11-19T12:18:40,705 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/8d695fc902b34fc3a7e42f24d57840cf to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/8d695fc902b34fc3a7e42f24d57840cf 2024-11-19T12:18:40,706 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/053636572e3c46ada74856e030a47ddc to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/053636572e3c46ada74856e030a47ddc 2024-11-19T12:18:40,707 DEBUG [StoreCloser-TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/387208b07aaa4db89ffb284e31b08a6c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/387208b07aaa4db89ffb284e31b08a6c 2024-11-19T12:18:40,713 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/recovered.edits/387.seqid, newMaxSeqId=387, maxSeqId=4 2024-11-19T12:18:40,714 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02. 2024-11-19T12:18:40,714 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1635): Region close journal for 9f6f39e9e25e92ed07f6efcc92916a02: 2024-11-19T12:18:40,716 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(170): Closed 9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:40,716 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=63 updating hbase:meta row=9f6f39e9e25e92ed07f6efcc92916a02, regionState=CLOSED 2024-11-19T12:18:40,719 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-19T12:18:40,719 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseRegionProcedure 9f6f39e9e25e92ed07f6efcc92916a02, server=af314c41f984,36047,1732018661455 in 2.2090 sec 2024-11-19T12:18:40,720 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=63, resume processing ppid=62 2024-11-19T12:18:40,720 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, ppid=62, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9f6f39e9e25e92ed07f6efcc92916a02, UNASSIGN in 2.2130 sec 2024-11-19T12:18:40,722 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-19T12:18:40,722 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.2160 sec 2024-11-19T12:18:40,723 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018720723"}]},"ts":"1732018720723"} 2024-11-19T12:18:40,723 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-19T12:18:40,726 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-19T12:18:40,727 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.2280 sec 2024-11-19T12:18:41,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-19T12:18:42,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-19T12:18:42,605 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-19T12:18:42,606 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-19T12:18:42,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:18:42,607 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=65, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:18:42,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-19T12:18:42,608 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=65, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:18:42,609 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,611 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/recovered.edits] 2024-11-19T12:18:42,614 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/43a55ad238f1480dba92a9026e692b4f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/43a55ad238f1480dba92a9026e692b4f 2024-11-19T12:18:42,616 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/5fb9915165aa4ec58fa739dff5ff2cb8 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/5fb9915165aa4ec58fa739dff5ff2cb8 2024-11-19T12:18:42,617 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/6da812f08a3d4363a3b3bd7419078f61 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/6da812f08a3d4363a3b3bd7419078f61 2024-11-19T12:18:42,618 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/b1db3cfe71b34ffcb894b5e1ca8010f7 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/b1db3cfe71b34ffcb894b5e1ca8010f7 2024-11-19T12:18:42,620 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/db90bf1e47b8452d8afc768d01801c1a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/A/db90bf1e47b8452d8afc768d01801c1a 2024-11-19T12:18:42,623 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/73a6448ce01b48aab72b6fbbc2ec192e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/73a6448ce01b48aab72b6fbbc2ec192e 2024-11-19T12:18:42,624 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/8a78d39633854fc79d7ab83a14f7cc07 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/8a78d39633854fc79d7ab83a14f7cc07 2024-11-19T12:18:42,625 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/adeffb56f7f34d38b4499183fb18871d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/adeffb56f7f34d38b4499183fb18871d 2024-11-19T12:18:42,626 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/c6cc3e6086044c9ea0d48d2e556b4ec5 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/c6cc3e6086044c9ea0d48d2e556b4ec5 2024-11-19T12:18:42,628 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/cf58c4683d954fe6ab79605d79a6d191 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/B/cf58c4683d954fe6ab79605d79a6d191 2024-11-19T12:18:42,630 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/216b13e892b747b29fae4985d80ba53a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/216b13e892b747b29fae4985d80ba53a 2024-11-19T12:18:42,631 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/54183ef18a554610be5123ef35d6388c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/54183ef18a554610be5123ef35d6388c 2024-11-19T12:18:42,632 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/a68679d3fc00426ba9b7e1b049b262a0 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/a68679d3fc00426ba9b7e1b049b262a0 2024-11-19T12:18:42,633 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/b63ed5faabee4bb7bf73ab494d95277e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/b63ed5faabee4bb7bf73ab494d95277e 2024-11-19T12:18:42,635 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/dccd61177f674201af58ce7b4556db26 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/C/dccd61177f674201af58ce7b4556db26 2024-11-19T12:18:42,637 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/recovered.edits/387.seqid to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02/recovered.edits/387.seqid 2024-11-19T12:18:42,638 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,638 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-19T12:18:42,639 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-19T12:18:42,640 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-19T12:18:42,644 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411190a228acbded649b9934c7dfcb7f7ccc3_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411190a228acbded649b9934c7dfcb7f7ccc3_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,645 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111929e3708ad60b49f38f46100a8723c355_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111929e3708ad60b49f38f46100a8723c355_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,646 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411192cd2cf776d124b6ea0822bd5f51e08bf_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411192cd2cf776d124b6ea0822bd5f51e08bf_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,647 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411193356f5af3fc84368bb7e01e873f26564_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411193356f5af3fc84368bb7e01e873f26564_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,648 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119365fc5b0a64f47e6b6e6f5bd35609108_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119365fc5b0a64f47e6b6e6f5bd35609108_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,649 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411193a1e6532285f48a0822917e19847e225_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411193a1e6532285f48a0822917e19847e225_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,650 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111949824db0593042aa8413e93e8d390dcd_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111949824db0593042aa8413e93e8d390dcd_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,651 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111956c6c7eb00a8441e9513c4d313b0e994_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111956c6c7eb00a8441e9513c4d313b0e994_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,652 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411195eb19c8c7d9940929f514feb949ea238_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411195eb19c8c7d9940929f514feb949ea238_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,654 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411196c919998fd5b4bea881df3f7615685d8_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411196c919998fd5b4bea881df3f7615685d8_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,655 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411196ea4f55377f14d6096598bdaf5cc8cf6_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411196ea4f55377f14d6096598bdaf5cc8cf6_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,656 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411197af415778c404cd187b676254c821088_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411197af415778c404cd187b676254c821088_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,657 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119a04c9bd25cb543209449279342d5bf0e_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119a04c9bd25cb543209449279342d5bf0e_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,659 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119a2a1075e484a420f89e7fe8fb88ed489_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119a2a1075e484a420f89e7fe8fb88ed489_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,660 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119be061c0e19894cb8bb878eacc098622d_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119be061c0e19894cb8bb878eacc098622d_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,661 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119c47606c809424a759adaa1cf15dfd3ed_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119c47606c809424a759adaa1cf15dfd3ed_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,663 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119d396ddd11685461abec68698722d9e61_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119d396ddd11685461abec68698722d9e61_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,664 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119eeb00e3b39ff41c7bedffc7c5ffb4ca5_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119eeb00e3b39ff41c7bedffc7c5ffb4ca5_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,665 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119fb76cf38aa4343839b9e6c09ae659f74_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119fb76cf38aa4343839b9e6c09ae659f74_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,666 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119ff88c177baa24083b9911f997eeaedb4_9f6f39e9e25e92ed07f6efcc92916a02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119ff88c177baa24083b9911f997eeaedb4_9f6f39e9e25e92ed07f6efcc92916a02 2024-11-19T12:18:42,666 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-19T12:18:42,668 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=65, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:18:42,671 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-19T12:18:42,674 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-19T12:18:42,675 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=65, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:18:42,675 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-19T12:18:42,675 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732018722675"}]},"ts":"9223372036854775807"} 2024-11-19T12:18:42,677 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-19T12:18:42,677 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 9f6f39e9e25e92ed07f6efcc92916a02, NAME => 'TestAcidGuarantees,,1732018694487.9f6f39e9e25e92ed07f6efcc92916a02.', STARTKEY => '', ENDKEY => ''}] 2024-11-19T12:18:42,678 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-19T12:18:42,678 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732018722678"}]},"ts":"9223372036854775807"} 2024-11-19T12:18:42,679 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-19T12:18:42,681 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=65, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:18:42,682 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 75 msec 2024-11-19T12:18:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-19T12:18:42,709 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-11-19T12:18:42,717 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=240 (was 239) Potentially hanging thread: hconnection-0x5525568c-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5525568c-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-633302489_22 at /127.0.0.1:54706 [Waiting for operation #567] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5525568c-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/cluster_516133e5-12a5-9390-7554-f37f555213db/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_222723292_22 at /127.0.0.1:50896 [Waiting for operation #267] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/cluster_516133e5-12a5-9390-7554-f37f555213db/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5525568c-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=456 (was 458), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=546 (was 498) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2687 (was 2028) - AvailableMemoryMB LEAK? - 2024-11-19T12:18:42,725 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=240, OpenFileDescriptor=456, MaxFileDescriptor=1048576, SystemLoadAverage=546, ProcessCount=11, AvailableMemoryMB=2687 2024-11-19T12:18:42,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-19T12:18:42,727 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:18:42,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=66, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-19T12:18:42,728 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T12:18:42,728 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:42,728 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 66 2024-11-19T12:18:42,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-19T12:18:42,729 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T12:18:42,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742079_1255 (size=960) 2024-11-19T12:18:42,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-19T12:18:43,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-19T12:18:43,136 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22 2024-11-19T12:18:43,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742080_1256 (size=53) 2024-11-19T12:18:43,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-19T12:18:43,542 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:18:43,542 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing c974ed334ef63a1e045d42c6ff516b94, disabling compactions & flushes 2024-11-19T12:18:43,542 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:43,542 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:43,542 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. after waiting 0 ms 2024-11-19T12:18:43,542 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:43,542 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:43,543 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:43,543 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T12:18:43,544 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732018723543"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732018723543"}]},"ts":"1732018723543"} 2024-11-19T12:18:43,545 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-19T12:18:43,545 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T12:18:43,545 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018723545"}]},"ts":"1732018723545"} 2024-11-19T12:18:43,546 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-19T12:18:43,549 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c974ed334ef63a1e045d42c6ff516b94, ASSIGN}] 2024-11-19T12:18:43,550 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c974ed334ef63a1e045d42c6ff516b94, ASSIGN 2024-11-19T12:18:43,551 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=c974ed334ef63a1e045d42c6ff516b94, ASSIGN; state=OFFLINE, location=af314c41f984,36047,1732018661455; forceNewPlan=false, retain=false 2024-11-19T12:18:43,701 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=c974ed334ef63a1e045d42c6ff516b94, regionState=OPENING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:18:43,702 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; OpenRegionProcedure c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:18:43,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-19T12:18:43,854 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:43,857 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:43,857 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7285): Opening region: {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:18:43,857 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:43,857 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:18:43,857 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7327): checking encryption for c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:43,858 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7330): checking classloading for c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:43,859 INFO [StoreOpener-c974ed334ef63a1e045d42c6ff516b94-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:43,860 INFO [StoreOpener-c974ed334ef63a1e045d42c6ff516b94-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:18:43,860 INFO [StoreOpener-c974ed334ef63a1e045d42c6ff516b94-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c974ed334ef63a1e045d42c6ff516b94 columnFamilyName A 2024-11-19T12:18:43,860 DEBUG [StoreOpener-c974ed334ef63a1e045d42c6ff516b94-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:43,860 INFO [StoreOpener-c974ed334ef63a1e045d42c6ff516b94-1 {}] regionserver.HStore(327): Store=c974ed334ef63a1e045d42c6ff516b94/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:18:43,861 INFO [StoreOpener-c974ed334ef63a1e045d42c6ff516b94-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:43,861 INFO [StoreOpener-c974ed334ef63a1e045d42c6ff516b94-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:18:43,862 INFO [StoreOpener-c974ed334ef63a1e045d42c6ff516b94-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c974ed334ef63a1e045d42c6ff516b94 columnFamilyName B 2024-11-19T12:18:43,862 DEBUG [StoreOpener-c974ed334ef63a1e045d42c6ff516b94-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:43,862 INFO [StoreOpener-c974ed334ef63a1e045d42c6ff516b94-1 {}] regionserver.HStore(327): Store=c974ed334ef63a1e045d42c6ff516b94/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:18:43,862 INFO [StoreOpener-c974ed334ef63a1e045d42c6ff516b94-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:43,863 INFO [StoreOpener-c974ed334ef63a1e045d42c6ff516b94-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:18:43,863 INFO [StoreOpener-c974ed334ef63a1e045d42c6ff516b94-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c974ed334ef63a1e045d42c6ff516b94 columnFamilyName C 2024-11-19T12:18:43,863 DEBUG [StoreOpener-c974ed334ef63a1e045d42c6ff516b94-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:18:43,863 INFO [StoreOpener-c974ed334ef63a1e045d42c6ff516b94-1 {}] regionserver.HStore(327): Store=c974ed334ef63a1e045d42c6ff516b94/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:18:43,864 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:43,864 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:43,864 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:43,866 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:18:43,867 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1085): writing seq id for c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:43,868 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:18:43,868 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1102): Opened c974ed334ef63a1e045d42c6ff516b94; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66546767, jitterRate=-0.008375898003578186}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:18:43,869 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1001): Region open journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:43,870 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., pid=68, masterSystemTime=1732018723853 2024-11-19T12:18:43,871 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:43,871 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:43,871 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=c974ed334ef63a1e045d42c6ff516b94, regionState=OPEN, openSeqNum=2, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:18:43,873 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-19T12:18:43,873 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; OpenRegionProcedure c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 in 170 msec 2024-11-19T12:18:43,875 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=66 2024-11-19T12:18:43,875 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=66, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c974ed334ef63a1e045d42c6ff516b94, ASSIGN in 324 msec 2024-11-19T12:18:43,875 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T12:18:43,875 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018723875"}]},"ts":"1732018723875"} 2024-11-19T12:18:43,876 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-19T12:18:43,878 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T12:18:43,879 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1510 sec 2024-11-19T12:18:44,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-19T12:18:44,833 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 66 completed 2024-11-19T12:18:44,834 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x320146a2 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@fe4ac0b 2024-11-19T12:18:44,837 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4becc07d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:44,838 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:44,840 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33682, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:44,841 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T12:18:44,842 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54026, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T12:18:44,843 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5bab3f39 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@718544b3 2024-11-19T12:18:44,846 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cc79dd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:44,847 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x07e48016 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c9ffc85 2024-11-19T12:18:44,851 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cd6e3ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:44,852 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3b2ae977 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18724143 2024-11-19T12:18:44,854 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13b0002b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:44,855 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0ed37f32 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4215ff2 2024-11-19T12:18:44,857 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1703a605, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:44,858 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12e88ea6 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1cb89dc6 2024-11-19T12:18:44,860 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@168133da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:44,861 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x25f2abe2 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a724365 2024-11-19T12:18:44,863 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a9f805a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:44,864 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0598ef39 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d59ed84 2024-11-19T12:18:44,866 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b123525, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:44,867 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x23d0f458 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4187186b 2024-11-19T12:18:44,869 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bd6a663, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:44,870 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x77780196 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@ec51b52 2024-11-19T12:18:44,872 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10be4157, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:44,873 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x069ef766 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@20a7636c 2024-11-19T12:18:44,875 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42712ad6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:18:44,879 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:44,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-11-19T12:18:44,880 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:44,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-19T12:18:44,881 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:44,881 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:44,886 DEBUG [hconnection-0x3273cd1e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:44,886 DEBUG [hconnection-0x67767d34-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:44,886 DEBUG [hconnection-0x57bb0718-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:44,887 DEBUG [hconnection-0x459e4330-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:44,887 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33690, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:44,887 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33698, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:44,887 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33700, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:44,888 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33704, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:44,888 DEBUG [hconnection-0x4644ce8d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:44,889 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33710, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:44,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:44,894 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-19T12:18:44,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:18:44,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:44,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:18:44,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:44,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:18:44,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:44,908 DEBUG [hconnection-0x25633912-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:44,908 DEBUG [hconnection-0x31dec9fe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:44,909 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33712, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:44,909 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33726, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:44,912 DEBUG [hconnection-0x146dc893-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:44,913 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33734, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:44,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:44,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33698 deadline: 1732018784911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:44,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:44,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33710 deadline: 1732018784911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:44,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:44,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33704 deadline: 1732018784913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:44,924 DEBUG [hconnection-0x2d4ea307-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:44,925 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33738, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:44,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:44,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018784926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:44,928 DEBUG [hconnection-0xb21be7a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:18:44,929 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33740, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:18:44,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:44,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018784930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:44,956 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/959f8366183549758d38989e22c4cd03 is 50, key is test_row_0/A:col10/1732018724892/Put/seqid=0 2024-11-19T12:18:44,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742081_1257 (size=12001) 2024-11-19T12:18:44,975 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/959f8366183549758d38989e22c4cd03 2024-11-19T12:18:44,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-19T12:18:45,016 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33698 deadline: 1732018785015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33704 deadline: 1732018785015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/eb83648fd2f0482b83b2afac0b3972c2 is 50, key is test_row_0/B:col10/1732018724892/Put/seqid=0 2024-11-19T12:18:45,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33710 deadline: 1732018785015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018785028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,032 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:45,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018785031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-19T12:18:45,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:45,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:45,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:45,033 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742082_1258 (size=12001) 2024-11-19T12:18:45,046 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/eb83648fd2f0482b83b2afac0b3972c2 2024-11-19T12:18:45,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/217be640898e48efaf854b16147a7dc3 is 50, key is test_row_0/C:col10/1732018724892/Put/seqid=0 2024-11-19T12:18:45,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742083_1259 (size=12001) 2024-11-19T12:18:45,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-19T12:18:45,186 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:45,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-19T12:18:45,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:45,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:45,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:45,187 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33698 deadline: 1732018785217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33704 deadline: 1732018785218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33710 deadline: 1732018785221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018785231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018785234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,339 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:45,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-19T12:18:45,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:45,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:45,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:45,340 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-19T12:18:45,492 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:45,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-19T12:18:45,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:45,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:45,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:45,493 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,494 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/217be640898e48efaf854b16147a7dc3 2024-11-19T12:18:45,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/959f8366183549758d38989e22c4cd03 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/959f8366183549758d38989e22c4cd03 2024-11-19T12:18:45,503 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/959f8366183549758d38989e22c4cd03, entries=150, sequenceid=13, filesize=11.7 K 2024-11-19T12:18:45,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/eb83648fd2f0482b83b2afac0b3972c2 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/eb83648fd2f0482b83b2afac0b3972c2 2024-11-19T12:18:45,509 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/eb83648fd2f0482b83b2afac0b3972c2, entries=150, sequenceid=13, filesize=11.7 K 2024-11-19T12:18:45,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/217be640898e48efaf854b16147a7dc3 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/217be640898e48efaf854b16147a7dc3 2024-11-19T12:18:45,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/217be640898e48efaf854b16147a7dc3, entries=150, sequenceid=13, filesize=11.7 K 2024-11-19T12:18:45,515 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for c974ed334ef63a1e045d42c6ff516b94 in 621ms, sequenceid=13, compaction requested=false 2024-11-19T12:18:45,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:45,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:45,522 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-19T12:18:45,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:18:45,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:45,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:18:45,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:45,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:18:45,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:45,527 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/8e88df4abb26414ebb7b9f835b6fe607 is 50, key is test_row_0/A:col10/1732018724912/Put/seqid=0 2024-11-19T12:18:45,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742084_1260 (size=12001) 2024-11-19T12:18:45,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33710 deadline: 1732018785532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33704 deadline: 1732018785532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33698 deadline: 1732018785533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018785535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018785536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33710 deadline: 1732018785636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33698 deadline: 1732018785636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33704 deadline: 1732018785636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,645 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:45,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-19T12:18:45,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:45,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:45,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:45,646 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,798 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:45,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-19T12:18:45,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:45,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:45,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:45,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33698 deadline: 1732018785839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33710 deadline: 1732018785840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:45,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33704 deadline: 1732018785841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:45,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/8e88df4abb26414ebb7b9f835b6fe607 2024-11-19T12:18:45,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/6080bde75fb74d8281eee21d6e14f706 is 50, key is test_row_0/B:col10/1732018724912/Put/seqid=0 2024-11-19T12:18:45,951 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:45,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-19T12:18:45,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:45,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:45,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:45,952 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:45,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742085_1261 (size=12001) 2024-11-19T12:18:45,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-19T12:18:46,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:46,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018786039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:46,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:46,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018786044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:46,104 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:46,104 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-19T12:18:46,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:46,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:46,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:46,104 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:46,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:46,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:46,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:46,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33698 deadline: 1732018786142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:46,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:46,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33704 deadline: 1732018786142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:46,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:46,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33710 deadline: 1732018786143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:46,235 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-19T12:18:46,257 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:46,257 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-19T12:18:46,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:46,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:46,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:46,258 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:46,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:46,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:46,331 DEBUG [master/af314c41f984:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region dec5b3d3b76c616b09a7c531fa488ba4 changed from -1.0 to 0.0, refreshing cache 2024-11-19T12:18:46,356 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/6080bde75fb74d8281eee21d6e14f706 2024-11-19T12:18:46,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/2fff2f7d0fa044d8b789f47203baefef is 50, key is test_row_0/C:col10/1732018724912/Put/seqid=0 2024-11-19T12:18:46,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742086_1262 (size=12001) 2024-11-19T12:18:46,410 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:46,410 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-19T12:18:46,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:46,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:46,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:46,411 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:46,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:46,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:46,563 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:46,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-19T12:18:46,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:46,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:46,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:46,563 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:46,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:46,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:46,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33698 deadline: 1732018786646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:46,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:46,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33704 deadline: 1732018786648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:46,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:46,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33710 deadline: 1732018786649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:46,715 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:46,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-19T12:18:46,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:46,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:46,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:46,716 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:46,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:46,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:46,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/2fff2f7d0fa044d8b789f47203baefef 2024-11-19T12:18:46,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/8e88df4abb26414ebb7b9f835b6fe607 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/8e88df4abb26414ebb7b9f835b6fe607 2024-11-19T12:18:46,777 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/8e88df4abb26414ebb7b9f835b6fe607, entries=150, sequenceid=38, filesize=11.7 K 2024-11-19T12:18:46,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/6080bde75fb74d8281eee21d6e14f706 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/6080bde75fb74d8281eee21d6e14f706 2024-11-19T12:18:46,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/6080bde75fb74d8281eee21d6e14f706, entries=150, sequenceid=38, filesize=11.7 K 2024-11-19T12:18:46,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/2fff2f7d0fa044d8b789f47203baefef as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/2fff2f7d0fa044d8b789f47203baefef 2024-11-19T12:18:46,787 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/2fff2f7d0fa044d8b789f47203baefef, entries=150, sequenceid=38, filesize=11.7 K 2024-11-19T12:18:46,788 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for c974ed334ef63a1e045d42c6ff516b94 in 1266ms, sequenceid=38, compaction requested=false 2024-11-19T12:18:46,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:46,868 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:46,868 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-19T12:18:46,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:46,869 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-19T12:18:46,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:18:46,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:46,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:18:46,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:46,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:18:46,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:46,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/c9ff3fcff8434479a820cb74a97a6289 is 50, key is test_row_0/A:col10/1732018725530/Put/seqid=0 2024-11-19T12:18:46,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742087_1263 (size=12001) 2024-11-19T12:18:46,887 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/c9ff3fcff8434479a820cb74a97a6289 2024-11-19T12:18:46,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/7d6bbece50ca4ccdad3e552481e87906 is 50, key is test_row_0/B:col10/1732018725530/Put/seqid=0 2024-11-19T12:18:46,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742088_1264 (size=12001) 2024-11-19T12:18:46,907 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/7d6bbece50ca4ccdad3e552481e87906 2024-11-19T12:18:46,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/5b202ef1c67e485b9b514e46ed91b49c is 50, key is test_row_0/C:col10/1732018725530/Put/seqid=0 2024-11-19T12:18:46,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742089_1265 (size=12001) 2024-11-19T12:18:46,920 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/5b202ef1c67e485b9b514e46ed91b49c 2024-11-19T12:18:46,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/c9ff3fcff8434479a820cb74a97a6289 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/c9ff3fcff8434479a820cb74a97a6289 2024-11-19T12:18:46,932 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/c9ff3fcff8434479a820cb74a97a6289, entries=150, sequenceid=49, filesize=11.7 K 2024-11-19T12:18:46,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/7d6bbece50ca4ccdad3e552481e87906 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7d6bbece50ca4ccdad3e552481e87906 2024-11-19T12:18:46,937 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7d6bbece50ca4ccdad3e552481e87906, entries=150, sequenceid=49, filesize=11.7 K 2024-11-19T12:18:46,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/5b202ef1c67e485b9b514e46ed91b49c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/5b202ef1c67e485b9b514e46ed91b49c 2024-11-19T12:18:46,942 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/5b202ef1c67e485b9b514e46ed91b49c, entries=150, sequenceid=49, filesize=11.7 K 2024-11-19T12:18:46,943 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for c974ed334ef63a1e045d42c6ff516b94 in 74ms, sequenceid=49, compaction requested=true 2024-11-19T12:18:46,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:46,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:46,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-19T12:18:46,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-11-19T12:18:46,945 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-19T12:18:46,946 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0630 sec 2024-11-19T12:18:46,947 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 2.0670 sec 2024-11-19T12:18:46,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-19T12:18:46,984 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-11-19T12:18:46,986 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:46,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-19T12:18:46,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-19T12:18:46,987 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:46,989 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:46,989 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:47,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:47,055 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-19T12:18:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:18:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:18:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:18:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:47,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/a6eb3ed3acb84470bb3bb0d683123516 is 50, key is test_row_1/A:col10/1732018727054/Put/seqid=0 2024-11-19T12:18:47,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742090_1266 (size=9657) 2024-11-19T12:18:47,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-19T12:18:47,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:47,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018787087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:47,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:47,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018787087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:47,143 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:47,143 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-19T12:18:47,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:47,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:47,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:47,143 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:47,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:47,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:47,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:47,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018787190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:47,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:47,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018787191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:47,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-19T12:18:47,295 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:47,296 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-19T12:18:47,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:47,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:47,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:47,296 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:47,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:47,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:47,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:47,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018787394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:47,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:47,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018787394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:47,448 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:47,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-19T12:18:47,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:47,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:47,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:47,449 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:47,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:47,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:47,464 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/a6eb3ed3acb84470bb3bb0d683123516 2024-11-19T12:18:47,471 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/41f43fae60c0489db311d80885a3c0fb is 50, key is test_row_1/B:col10/1732018727054/Put/seqid=0 2024-11-19T12:18:47,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742091_1267 (size=9657) 2024-11-19T12:18:47,475 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/41f43fae60c0489db311d80885a3c0fb 2024-11-19T12:18:47,482 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/a814c4b09352487c8b0b98e946080102 is 50, key is test_row_1/C:col10/1732018727054/Put/seqid=0 2024-11-19T12:18:47,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742092_1268 (size=9657) 2024-11-19T12:18:47,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-19T12:18:47,601 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:47,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-19T12:18:47,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:47,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:47,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:47,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:47,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:47,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:47,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:47,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33698 deadline: 1732018787649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:47,654 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:47,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33704 deadline: 1732018787653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:47,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:47,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33710 deadline: 1732018787657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:47,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:47,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018787697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:47,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:47,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018787698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:47,754 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:47,755 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-19T12:18:47,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:47,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:47,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:47,755 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:47,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:47,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:47,886 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/a814c4b09352487c8b0b98e946080102 2024-11-19T12:18:47,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/a6eb3ed3acb84470bb3bb0d683123516 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a6eb3ed3acb84470bb3bb0d683123516 2024-11-19T12:18:47,896 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a6eb3ed3acb84470bb3bb0d683123516, entries=100, sequenceid=60, filesize=9.4 K 2024-11-19T12:18:47,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/41f43fae60c0489db311d80885a3c0fb as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/41f43fae60c0489db311d80885a3c0fb 2024-11-19T12:18:47,901 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/41f43fae60c0489db311d80885a3c0fb, entries=100, sequenceid=60, filesize=9.4 K 2024-11-19T12:18:47,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/a814c4b09352487c8b0b98e946080102 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/a814c4b09352487c8b0b98e946080102 2024-11-19T12:18:47,905 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/a814c4b09352487c8b0b98e946080102, entries=100, sequenceid=60, filesize=9.4 K 2024-11-19T12:18:47,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for c974ed334ef63a1e045d42c6ff516b94 in 851ms, sequenceid=60, compaction requested=true 2024-11-19T12:18:47,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:47,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:18:47,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:47,906 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:18:47,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:18:47,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:47,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:18:47,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:47,906 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:18:47,907 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:47,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-19T12:18:47,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:47,908 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-19T12:18:47,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:18:47,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:47,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:18:47,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:47,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:18:47,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:47,908 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:18:47,908 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/A is initiating minor compaction (all files) 2024-11-19T12:18:47,908 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/A in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:47,909 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/959f8366183549758d38989e22c4cd03, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/8e88df4abb26414ebb7b9f835b6fe607, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/c9ff3fcff8434479a820cb74a97a6289, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a6eb3ed3acb84470bb3bb0d683123516] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=44.6 K 2024-11-19T12:18:47,909 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:18:47,909 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 959f8366183549758d38989e22c4cd03, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732018724891 2024-11-19T12:18:47,909 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/B is initiating minor compaction (all files) 2024-11-19T12:18:47,909 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/B in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:47,909 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/eb83648fd2f0482b83b2afac0b3972c2, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/6080bde75fb74d8281eee21d6e14f706, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7d6bbece50ca4ccdad3e552481e87906, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/41f43fae60c0489db311d80885a3c0fb] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=44.6 K 2024-11-19T12:18:47,910 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e88df4abb26414ebb7b9f835b6fe607, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732018724907 2024-11-19T12:18:47,910 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting eb83648fd2f0482b83b2afac0b3972c2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732018724891 2024-11-19T12:18:47,910 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9ff3fcff8434479a820cb74a97a6289, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732018725529 2024-11-19T12:18:47,911 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 6080bde75fb74d8281eee21d6e14f706, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732018724907 2024-11-19T12:18:47,911 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6eb3ed3acb84470bb3bb0d683123516, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1732018727053 2024-11-19T12:18:47,912 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d6bbece50ca4ccdad3e552481e87906, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732018725529 2024-11-19T12:18:47,913 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 41f43fae60c0489db311d80885a3c0fb, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1732018727053 2024-11-19T12:18:47,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/729c6e20e2c049f0ad9d05d22f639898 is 50, key is test_row_0/A:col10/1732018727082/Put/seqid=0 2024-11-19T12:18:47,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742093_1269 (size=12001) 2024-11-19T12:18:47,927 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/729c6e20e2c049f0ad9d05d22f639898 2024-11-19T12:18:47,933 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#A#compaction#223 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:47,935 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/6243faae9f4b4e99bdce8c112d0a2d78 is 50, key is test_row_0/A:col10/1732018725530/Put/seqid=0 2024-11-19T12:18:47,943 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#B#compaction#224 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:47,943 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/d01ce41f908d4e6888725f5e0852e1bb is 50, key is test_row_0/B:col10/1732018725530/Put/seqid=0 2024-11-19T12:18:47,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/b40fba99f9a34d00ac37fc9388f2aecf is 50, key is test_row_0/B:col10/1732018727082/Put/seqid=0 2024-11-19T12:18:47,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742095_1271 (size=12139) 2024-11-19T12:18:47,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742096_1272 (size=12001) 2024-11-19T12:18:47,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742094_1270 (size=12139) 2024-11-19T12:18:47,965 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/b40fba99f9a34d00ac37fc9388f2aecf 2024-11-19T12:18:47,966 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/d01ce41f908d4e6888725f5e0852e1bb as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/d01ce41f908d4e6888725f5e0852e1bb 2024-11-19T12:18:47,972 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/B of c974ed334ef63a1e045d42c6ff516b94 into d01ce41f908d4e6888725f5e0852e1bb(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:47,972 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:47,972 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/B, priority=12, startTime=1732018727906; duration=0sec 2024-11-19T12:18:47,972 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:47,972 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:B 2024-11-19T12:18:47,972 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:18:47,974 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:18:47,974 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/C is initiating minor compaction (all files) 2024-11-19T12:18:47,974 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/C in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:47,974 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/217be640898e48efaf854b16147a7dc3, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/2fff2f7d0fa044d8b789f47203baefef, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/5b202ef1c67e485b9b514e46ed91b49c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/a814c4b09352487c8b0b98e946080102] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=44.6 K 2024-11-19T12:18:47,974 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 217be640898e48efaf854b16147a7dc3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732018724891 2024-11-19T12:18:47,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/ae3c4a7cca2e4142b423faa74c8ab7b0 is 50, key is test_row_0/C:col10/1732018727082/Put/seqid=0 2024-11-19T12:18:47,975 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 2fff2f7d0fa044d8b789f47203baefef, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732018724907 2024-11-19T12:18:47,975 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b202ef1c67e485b9b514e46ed91b49c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732018725529 2024-11-19T12:18:47,975 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting a814c4b09352487c8b0b98e946080102, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1732018727053 2024-11-19T12:18:47,976 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/6243faae9f4b4e99bdce8c112d0a2d78 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/6243faae9f4b4e99bdce8c112d0a2d78 2024-11-19T12:18:47,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742097_1273 (size=12001) 2024-11-19T12:18:47,982 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/A of c974ed334ef63a1e045d42c6ff516b94 into 6243faae9f4b4e99bdce8c112d0a2d78(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:47,982 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:47,982 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/A, priority=12, startTime=1732018727906; duration=0sec 2024-11-19T12:18:47,982 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:47,982 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:A 2024-11-19T12:18:47,983 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#C#compaction#227 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:47,984 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/69e71de2a48b4ef1b1787e437207b440 is 50, key is test_row_0/C:col10/1732018725530/Put/seqid=0 2024-11-19T12:18:48,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742098_1274 (size=12139) 2024-11-19T12:18:48,008 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/69e71de2a48b4ef1b1787e437207b440 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/69e71de2a48b4ef1b1787e437207b440 2024-11-19T12:18:48,014 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/C of c974ed334ef63a1e045d42c6ff516b94 into 69e71de2a48b4ef1b1787e437207b440(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:48,014 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:48,014 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/C, priority=12, startTime=1732018727906; duration=0sec 2024-11-19T12:18:48,014 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:48,014 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:C 2024-11-19T12:18:48,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-19T12:18:48,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:48,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:48,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:48,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018788208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:48,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:48,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018788210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:48,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:48,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018788311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:48,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:48,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018788312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:48,380 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/ae3c4a7cca2e4142b423faa74c8ab7b0 2024-11-19T12:18:48,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/729c6e20e2c049f0ad9d05d22f639898 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/729c6e20e2c049f0ad9d05d22f639898 2024-11-19T12:18:48,389 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/729c6e20e2c049f0ad9d05d22f639898, entries=150, sequenceid=86, filesize=11.7 K 2024-11-19T12:18:48,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/b40fba99f9a34d00ac37fc9388f2aecf as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/b40fba99f9a34d00ac37fc9388f2aecf 2024-11-19T12:18:48,393 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/b40fba99f9a34d00ac37fc9388f2aecf, entries=150, sequenceid=86, filesize=11.7 K 2024-11-19T12:18:48,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/ae3c4a7cca2e4142b423faa74c8ab7b0 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/ae3c4a7cca2e4142b423faa74c8ab7b0 2024-11-19T12:18:48,399 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/ae3c4a7cca2e4142b423faa74c8ab7b0, entries=150, sequenceid=86, filesize=11.7 K 2024-11-19T12:18:48,400 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for c974ed334ef63a1e045d42c6ff516b94 in 492ms, sequenceid=86, compaction requested=false 2024-11-19T12:18:48,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:48,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:48,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-19T12:18:48,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-19T12:18:48,404 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-19T12:18:48,404 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4130 sec 2024-11-19T12:18:48,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.4190 sec 2024-11-19T12:18:48,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:48,517 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-19T12:18:48,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:18:48,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:48,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:18:48,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:48,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:18:48,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:48,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/d69bd38966ec4e879879d25ba92cb7a2 is 50, key is test_row_0/A:col10/1732018728205/Put/seqid=0 2024-11-19T12:18:48,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742099_1275 (size=12001) 2024-11-19T12:18:48,534 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/d69bd38966ec4e879879d25ba92cb7a2 2024-11-19T12:18:48,543 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/d2111cdd4c2e47b18923412b70e81cc4 is 50, key is test_row_0/B:col10/1732018728205/Put/seqid=0 2024-11-19T12:18:48,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742100_1276 (size=12001) 2024-11-19T12:18:48,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/d2111cdd4c2e47b18923412b70e81cc4 2024-11-19T12:18:48,553 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/26845c6a9b97444ab150123dd3c84916 is 50, key is test_row_0/C:col10/1732018728205/Put/seqid=0 2024-11-19T12:18:48,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742101_1277 (size=12001) 2024-11-19T12:18:48,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:48,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018788563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:48,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:48,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018788564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:48,666 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:48,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018788665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:48,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:48,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018788666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:48,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:48,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018788867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:48,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:48,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018788869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:48,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/26845c6a9b97444ab150123dd3c84916 2024-11-19T12:18:48,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/d69bd38966ec4e879879d25ba92cb7a2 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/d69bd38966ec4e879879d25ba92cb7a2 2024-11-19T12:18:48,967 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/d69bd38966ec4e879879d25ba92cb7a2, entries=150, sequenceid=101, filesize=11.7 K 2024-11-19T12:18:48,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/d2111cdd4c2e47b18923412b70e81cc4 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/d2111cdd4c2e47b18923412b70e81cc4 2024-11-19T12:18:48,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/d2111cdd4c2e47b18923412b70e81cc4, entries=150, sequenceid=101, filesize=11.7 K 2024-11-19T12:18:48,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/26845c6a9b97444ab150123dd3c84916 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/26845c6a9b97444ab150123dd3c84916 2024-11-19T12:18:48,976 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/26845c6a9b97444ab150123dd3c84916, entries=150, sequenceid=101, filesize=11.7 K 2024-11-19T12:18:48,977 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for c974ed334ef63a1e045d42c6ff516b94 in 460ms, sequenceid=101, compaction requested=true 2024-11-19T12:18:48,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:48,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:18:48,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:48,977 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:48,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:18:48,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:48,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:18:48,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:48,977 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:48,978 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:48,978 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:48,978 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/B is initiating minor compaction (all files) 2024-11-19T12:18:48,978 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/A is initiating minor compaction (all files) 2024-11-19T12:18:48,978 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/B in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:48,978 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/A in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:48,978 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/d01ce41f908d4e6888725f5e0852e1bb, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/b40fba99f9a34d00ac37fc9388f2aecf, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/d2111cdd4c2e47b18923412b70e81cc4] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=35.3 K 2024-11-19T12:18:48,978 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/6243faae9f4b4e99bdce8c112d0a2d78, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/729c6e20e2c049f0ad9d05d22f639898, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/d69bd38966ec4e879879d25ba92cb7a2] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=35.3 K 2024-11-19T12:18:48,979 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting d01ce41f908d4e6888725f5e0852e1bb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1732018725530 2024-11-19T12:18:48,979 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6243faae9f4b4e99bdce8c112d0a2d78, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1732018725530 2024-11-19T12:18:48,979 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting b40fba99f9a34d00ac37fc9388f2aecf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1732018727079 2024-11-19T12:18:48,979 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 729c6e20e2c049f0ad9d05d22f639898, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1732018727079 2024-11-19T12:18:48,979 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting d2111cdd4c2e47b18923412b70e81cc4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1732018728205 2024-11-19T12:18:48,979 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting d69bd38966ec4e879879d25ba92cb7a2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1732018728205 2024-11-19T12:18:49,005 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#B#compaction#231 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:49,006 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/f7e2ed7efdc4447ba6acc37a734fc7d1 is 50, key is test_row_0/B:col10/1732018728205/Put/seqid=0 2024-11-19T12:18:49,007 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#A#compaction#232 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:49,008 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/925f1236bf6047a3afae9867d65f748b is 50, key is test_row_0/A:col10/1732018728205/Put/seqid=0 2024-11-19T12:18:49,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742102_1278 (size=12241) 2024-11-19T12:18:49,024 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/925f1236bf6047a3afae9867d65f748b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/925f1236bf6047a3afae9867d65f748b 2024-11-19T12:18:49,030 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/A of c974ed334ef63a1e045d42c6ff516b94 into 925f1236bf6047a3afae9867d65f748b(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:49,030 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:49,030 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/A, priority=13, startTime=1732018728977; duration=0sec 2024-11-19T12:18:49,030 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:49,031 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:A 2024-11-19T12:18:49,031 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:49,032 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:49,032 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/C is initiating minor compaction (all files) 2024-11-19T12:18:49,032 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/C in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:49,032 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/69e71de2a48b4ef1b1787e437207b440, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/ae3c4a7cca2e4142b423faa74c8ab7b0, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/26845c6a9b97444ab150123dd3c84916] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=35.3 K 2024-11-19T12:18:49,033 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69e71de2a48b4ef1b1787e437207b440, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1732018725530 2024-11-19T12:18:49,033 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae3c4a7cca2e4142b423faa74c8ab7b0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1732018727079 2024-11-19T12:18:49,034 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26845c6a9b97444ab150123dd3c84916, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1732018728205 2024-11-19T12:18:49,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742103_1279 (size=12241) 2024-11-19T12:18:49,041 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/f7e2ed7efdc4447ba6acc37a734fc7d1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/f7e2ed7efdc4447ba6acc37a734fc7d1 2024-11-19T12:18:49,044 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#C#compaction#233 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:49,045 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/92383f58e9574d4686a5124a6bd43e87 is 50, key is test_row_0/C:col10/1732018728205/Put/seqid=0 2024-11-19T12:18:49,046 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/B of c974ed334ef63a1e045d42c6ff516b94 into f7e2ed7efdc4447ba6acc37a734fc7d1(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:49,046 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:49,046 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/B, priority=13, startTime=1732018728977; duration=0sec 2024-11-19T12:18:49,046 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:49,046 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:B 2024-11-19T12:18:49,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742104_1280 (size=12241) 2024-11-19T12:18:49,073 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/92383f58e9574d4686a5124a6bd43e87 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/92383f58e9574d4686a5124a6bd43e87 2024-11-19T12:18:49,080 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/C of c974ed334ef63a1e045d42c6ff516b94 into 92383f58e9574d4686a5124a6bd43e87(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:49,080 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:49,080 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/C, priority=13, startTime=1732018728977; duration=0sec 2024-11-19T12:18:49,080 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:49,080 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:C 2024-11-19T12:18:49,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-19T12:18:49,092 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-19T12:18:49,093 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:49,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-19T12:18:49,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-19T12:18:49,095 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:49,096 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:49,096 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:49,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:49,173 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-19T12:18:49,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:18:49,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:49,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:18:49,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:49,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:18:49,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:49,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/a9929d8e31a741caa87b923bac8e7596 is 50, key is test_row_0/A:col10/1732018728555/Put/seqid=0 2024-11-19T12:18:49,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:49,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018789180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:49,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:49,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018789182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:49,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742105_1281 (size=12051) 2024-11-19T12:18:49,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-19T12:18:49,247 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:49,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-19T12:18:49,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:49,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:49,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:49,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:49,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:49,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:49,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:49,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018789283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:49,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:49,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018789284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:49,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-19T12:18:49,400 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:49,400 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-19T12:18:49,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:49,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:49,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:49,400 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:49,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:49,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:49,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:49,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018789485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:49,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:49,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018789487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:49,552 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:49,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-19T12:18:49,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:49,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:49,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:49,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:49,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:49,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:49,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/a9929d8e31a741caa87b923bac8e7596 2024-11-19T12:18:49,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/5c35a58fc5f1487795bc625053d951ae is 50, key is test_row_0/B:col10/1732018728555/Put/seqid=0 2024-11-19T12:18:49,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742106_1282 (size=12051) 2024-11-19T12:18:49,601 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/5c35a58fc5f1487795bc625053d951ae 2024-11-19T12:18:49,616 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/9957635465fe459c96591fec0f9232bf is 50, key is test_row_0/C:col10/1732018728555/Put/seqid=0 2024-11-19T12:18:49,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742107_1283 (size=12051) 2024-11-19T12:18:49,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:49,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33704 deadline: 1732018789670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:49,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33698 deadline: 1732018789670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:49,672 DEBUG [Thread-1179 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4141 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., hostname=af314c41f984,36047,1732018661455, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:18:49,672 DEBUG [Thread-1175 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4139 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., hostname=af314c41f984,36047,1732018661455, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:18:49,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:49,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33710 deadline: 1732018789677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:49,679 DEBUG [Thread-1177 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., hostname=af314c41f984,36047,1732018661455, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:18:49,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-19T12:18:49,705 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:49,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-19T12:18:49,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:49,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:49,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:49,706 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:49,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:49,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:49,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:49,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018789786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:49,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:49,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018789790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:49,858 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:49,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-19T12:18:49,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:49,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:49,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:49,859 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:49,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:49,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:50,011 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:50,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-19T12:18:50,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:50,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:50,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:50,012 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:50,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:50,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:50,021 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/9957635465fe459c96591fec0f9232bf 2024-11-19T12:18:50,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/a9929d8e31a741caa87b923bac8e7596 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a9929d8e31a741caa87b923bac8e7596 2024-11-19T12:18:50,030 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a9929d8e31a741caa87b923bac8e7596, entries=150, sequenceid=129, filesize=11.8 K 2024-11-19T12:18:50,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/5c35a58fc5f1487795bc625053d951ae as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/5c35a58fc5f1487795bc625053d951ae 2024-11-19T12:18:50,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/5c35a58fc5f1487795bc625053d951ae, entries=150, sequenceid=129, filesize=11.8 K 2024-11-19T12:18:50,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/9957635465fe459c96591fec0f9232bf as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/9957635465fe459c96591fec0f9232bf 2024-11-19T12:18:50,040 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/9957635465fe459c96591fec0f9232bf, entries=150, sequenceid=129, filesize=11.8 K 2024-11-19T12:18:50,041 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for c974ed334ef63a1e045d42c6ff516b94 in 869ms, sequenceid=129, compaction requested=false 2024-11-19T12:18:50,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:50,164 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:50,165 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-19T12:18:50,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:50,165 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-19T12:18:50,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:18:50,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:50,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:18:50,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:50,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:18:50,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:50,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/26252fff77cf4c609e06aeae626d65dc is 50, key is test_row_0/A:col10/1732018729173/Put/seqid=0 2024-11-19T12:18:50,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742108_1284 (size=12151) 2024-11-19T12:18:50,178 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/26252fff77cf4c609e06aeae626d65dc 2024-11-19T12:18:50,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/0e806b8f0e5a4e5e9fed7f31b7b5ae7f is 50, key is test_row_0/B:col10/1732018729173/Put/seqid=0 2024-11-19T12:18:50,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742109_1285 (size=12151) 2024-11-19T12:18:50,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-19T12:18:50,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:50,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:50,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:50,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018790324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:50,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:50,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018790326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:50,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:50,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018790427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:50,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:50,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018790429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:50,589 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/0e806b8f0e5a4e5e9fed7f31b7b5ae7f 2024-11-19T12:18:50,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/84b1ee01cd724e3b9877116a6a68cb98 is 50, key is test_row_0/C:col10/1732018729173/Put/seqid=0 2024-11-19T12:18:50,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742110_1286 (size=12151) 2024-11-19T12:18:50,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:50,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018790629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:50,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:50,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018790632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:50,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:50,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018790933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:50,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:50,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018790935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:51,001 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/84b1ee01cd724e3b9877116a6a68cb98 2024-11-19T12:18:51,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/26252fff77cf4c609e06aeae626d65dc as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/26252fff77cf4c609e06aeae626d65dc 2024-11-19T12:18:51,010 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/26252fff77cf4c609e06aeae626d65dc, entries=150, sequenceid=140, filesize=11.9 K 2024-11-19T12:18:51,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/0e806b8f0e5a4e5e9fed7f31b7b5ae7f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/0e806b8f0e5a4e5e9fed7f31b7b5ae7f 2024-11-19T12:18:51,014 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/0e806b8f0e5a4e5e9fed7f31b7b5ae7f, entries=150, sequenceid=140, filesize=11.9 K 2024-11-19T12:18:51,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/84b1ee01cd724e3b9877116a6a68cb98 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/84b1ee01cd724e3b9877116a6a68cb98 2024-11-19T12:18:51,018 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/84b1ee01cd724e3b9877116a6a68cb98, entries=150, sequenceid=140, filesize=11.9 K 2024-11-19T12:18:51,019 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for c974ed334ef63a1e045d42c6ff516b94 in 854ms, sequenceid=140, compaction requested=true 2024-11-19T12:18:51,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:51,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:51,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-19T12:18:51,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-19T12:18:51,022 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-19T12:18:51,022 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9240 sec 2024-11-19T12:18:51,023 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.9290 sec 2024-11-19T12:18:51,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-19T12:18:51,199 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-19T12:18:51,201 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:51,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-19T12:18:51,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-19T12:18:51,202 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:51,203 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:51,203 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:51,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-19T12:18:51,235 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-19T12:18:51,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-19T12:18:51,354 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:51,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-19T12:18:51,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:51,355 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-19T12:18:51,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:18:51,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:51,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:18:51,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:51,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:18:51,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:51,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/a3b30e76f9264a1a98a59419127d4c28 is 50, key is test_row_0/A:col10/1732018730323/Put/seqid=0 2024-11-19T12:18:51,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742111_1287 (size=12151) 2024-11-19T12:18:51,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:51,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:51,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:51,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018791448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:51,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:51,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018791449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:51,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-19T12:18:51,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:51,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018791552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:51,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:51,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018791552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:51,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:51,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018791755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:51,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:51,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018791755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:51,764 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/a3b30e76f9264a1a98a59419127d4c28 2024-11-19T12:18:51,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/9dc76864587b48ff8495ab8acefceb84 is 50, key is test_row_0/B:col10/1732018730323/Put/seqid=0 2024-11-19T12:18:51,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742112_1288 (size=12151) 2024-11-19T12:18:51,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-19T12:18:52,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:52,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018792058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:52,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:52,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018792059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:52,182 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/9dc76864587b48ff8495ab8acefceb84 2024-11-19T12:18:52,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/cbc81d61948a4227a55ae32e77ba8546 is 50, key is test_row_0/C:col10/1732018730323/Put/seqid=0 2024-11-19T12:18:52,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742113_1289 (size=12151) 2024-11-19T12:18:52,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-19T12:18:52,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:52,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018792561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:52,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:52,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018792563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:52,599 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/cbc81d61948a4227a55ae32e77ba8546 2024-11-19T12:18:52,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/a3b30e76f9264a1a98a59419127d4c28 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a3b30e76f9264a1a98a59419127d4c28 2024-11-19T12:18:52,607 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a3b30e76f9264a1a98a59419127d4c28, entries=150, sequenceid=165, filesize=11.9 K 2024-11-19T12:18:52,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/9dc76864587b48ff8495ab8acefceb84 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/9dc76864587b48ff8495ab8acefceb84 2024-11-19T12:18:52,611 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/9dc76864587b48ff8495ab8acefceb84, entries=150, sequenceid=165, filesize=11.9 K 2024-11-19T12:18:52,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/cbc81d61948a4227a55ae32e77ba8546 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/cbc81d61948a4227a55ae32e77ba8546 2024-11-19T12:18:52,615 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/cbc81d61948a4227a55ae32e77ba8546, entries=150, sequenceid=165, filesize=11.9 K 2024-11-19T12:18:52,616 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for c974ed334ef63a1e045d42c6ff516b94 in 1261ms, sequenceid=165, compaction requested=true 2024-11-19T12:18:52,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:52,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:52,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-19T12:18:52,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-19T12:18:52,619 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-19T12:18:52,619 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4140 sec 2024-11-19T12:18:52,620 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.4180 sec 2024-11-19T12:18:53,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-19T12:18:53,306 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-19T12:18:53,307 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-19T12:18:53,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-19T12:18:53,309 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:53,310 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:53,310 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:53,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-19T12:18:53,462 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:53,462 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-19T12:18:53,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:53,463 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-19T12:18:53,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:18:53,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:53,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:18:53,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:53,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:18:53,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:53,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/e83808798e6b460f927c2ff8d559e4b1 is 50, key is test_row_0/A:col10/1732018731448/Put/seqid=0 2024-11-19T12:18:53,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742114_1290 (size=12151) 2024-11-19T12:18:53,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:53,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:53,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-19T12:18:53,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:53,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018793614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:53,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:53,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018793614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:53,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:53,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33698 deadline: 1732018793679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:53,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:53,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33704 deadline: 1732018793680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:53,682 DEBUG [Thread-1175 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., hostname=af314c41f984,36047,1732018661455, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:18:53,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:53,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33710 deadline: 1732018793680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:53,682 DEBUG [Thread-1179 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., hostname=af314c41f984,36047,1732018661455, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:18:53,682 DEBUG [Thread-1177 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., hostname=af314c41f984,36047,1732018661455, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:18:53,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:53,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018793717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:53,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:53,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018793720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:53,872 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/e83808798e6b460f927c2ff8d559e4b1 2024-11-19T12:18:53,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/fd4442828b67436f91f584de28f76dd1 is 50, key is test_row_0/B:col10/1732018731448/Put/seqid=0 2024-11-19T12:18:53,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742115_1291 (size=12151) 2024-11-19T12:18:53,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-19T12:18:53,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:53,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018793921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:53,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:53,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018793921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:54,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:54,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018794224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:54,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:54,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018794227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:54,284 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/fd4442828b67436f91f584de28f76dd1 2024-11-19T12:18:54,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/137c3d9cfead4546ad30e27fb0551de9 is 50, key is test_row_0/C:col10/1732018731448/Put/seqid=0 2024-11-19T12:18:54,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742116_1292 (size=12151) 2024-11-19T12:18:54,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-19T12:18:54,702 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/137c3d9cfead4546ad30e27fb0551de9 2024-11-19T12:18:54,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/e83808798e6b460f927c2ff8d559e4b1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/e83808798e6b460f927c2ff8d559e4b1 2024-11-19T12:18:54,711 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/e83808798e6b460f927c2ff8d559e4b1, entries=150, sequenceid=176, filesize=11.9 K 2024-11-19T12:18:54,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/fd4442828b67436f91f584de28f76dd1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/fd4442828b67436f91f584de28f76dd1 2024-11-19T12:18:54,715 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/fd4442828b67436f91f584de28f76dd1, entries=150, sequenceid=176, filesize=11.9 K 2024-11-19T12:18:54,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/137c3d9cfead4546ad30e27fb0551de9 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/137c3d9cfead4546ad30e27fb0551de9 2024-11-19T12:18:54,720 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/137c3d9cfead4546ad30e27fb0551de9, entries=150, sequenceid=176, filesize=11.9 K 2024-11-19T12:18:54,721 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for c974ed334ef63a1e045d42c6ff516b94 in 1259ms, sequenceid=176, compaction requested=true 2024-11-19T12:18:54,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:54,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:54,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-19T12:18:54,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-19T12:18:54,723 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-19T12:18:54,723 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4120 sec 2024-11-19T12:18:54,725 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.4170 sec 2024-11-19T12:18:54,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:54,730 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-19T12:18:54,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:18:54,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:54,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:18:54,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:54,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:18:54,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:54,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/3cc6c05ec2ec40f995f4e41c39c932d1 is 50, key is test_row_0/A:col10/1732018733613/Put/seqid=0 2024-11-19T12:18:54,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742117_1293 (size=12151) 2024-11-19T12:18:54,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:54,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018794739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:54,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:54,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018794741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:54,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:54,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018794842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:54,847 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:54,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018794845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:55,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:55,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018795046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:55,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:55,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018795049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:55,139 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/3cc6c05ec2ec40f995f4e41c39c932d1 2024-11-19T12:18:55,147 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/04801c5e17c64a38a13008cb06426458 is 50, key is test_row_0/B:col10/1732018733613/Put/seqid=0 2024-11-19T12:18:55,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742118_1294 (size=12151) 2024-11-19T12:18:55,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:55,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018795350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:55,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:55,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018795355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:55,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-19T12:18:55,413 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-19T12:18:55,414 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:55,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-19T12:18:55,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-19T12:18:55,415 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:55,416 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:55,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:55,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-19T12:18:55,566 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/04801c5e17c64a38a13008cb06426458 2024-11-19T12:18:55,568 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:55,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-19T12:18:55,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:55,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:55,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:55,568 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:55,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:55,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:55,576 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/d5685b91e2824bdd97e08e0583cf139f is 50, key is test_row_0/C:col10/1732018733613/Put/seqid=0 2024-11-19T12:18:55,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742119_1295 (size=12151) 2024-11-19T12:18:55,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-19T12:18:55,721 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:55,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-19T12:18:55,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:55,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:55,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:55,722 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:55,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:55,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:55,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018795854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:55,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:55,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018795860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:55,874 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:55,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-19T12:18:55,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:55,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:55,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:55,874 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:55,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:55,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:55,980 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/d5685b91e2824bdd97e08e0583cf139f 2024-11-19T12:18:55,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/3cc6c05ec2ec40f995f4e41c39c932d1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3cc6c05ec2ec40f995f4e41c39c932d1 2024-11-19T12:18:55,988 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3cc6c05ec2ec40f995f4e41c39c932d1, entries=150, sequenceid=203, filesize=11.9 K 2024-11-19T12:18:55,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/04801c5e17c64a38a13008cb06426458 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/04801c5e17c64a38a13008cb06426458 2024-11-19T12:18:55,992 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/04801c5e17c64a38a13008cb06426458, entries=150, sequenceid=203, filesize=11.9 K 2024-11-19T12:18:55,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/d5685b91e2824bdd97e08e0583cf139f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/d5685b91e2824bdd97e08e0583cf139f 2024-11-19T12:18:55,996 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/d5685b91e2824bdd97e08e0583cf139f, entries=150, sequenceid=203, filesize=11.9 K 2024-11-19T12:18:55,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for c974ed334ef63a1e045d42c6ff516b94 in 1267ms, sequenceid=203, compaction requested=true 2024-11-19T12:18:55,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:55,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:18:55,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:55,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:18:55,998 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-19T12:18:55,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:55,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:18:55,998 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-19T12:18:55,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:55,999 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72896 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-19T12:18:55,999 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72896 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-19T12:18:55,999 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/A is initiating minor compaction (all files) 2024-11-19T12:18:55,999 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/B is initiating minor compaction (all files) 2024-11-19T12:18:55,999 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/A in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:55,999 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/B in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:56,000 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/925f1236bf6047a3afae9867d65f748b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a9929d8e31a741caa87b923bac8e7596, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/26252fff77cf4c609e06aeae626d65dc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a3b30e76f9264a1a98a59419127d4c28, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/e83808798e6b460f927c2ff8d559e4b1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3cc6c05ec2ec40f995f4e41c39c932d1] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=71.2 K 2024-11-19T12:18:56,000 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/f7e2ed7efdc4447ba6acc37a734fc7d1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/5c35a58fc5f1487795bc625053d951ae, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/0e806b8f0e5a4e5e9fed7f31b7b5ae7f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/9dc76864587b48ff8495ab8acefceb84, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/fd4442828b67436f91f584de28f76dd1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/04801c5e17c64a38a13008cb06426458] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=71.2 K 2024-11-19T12:18:56,000 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting f7e2ed7efdc4447ba6acc37a734fc7d1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1732018728205 2024-11-19T12:18:56,000 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 925f1236bf6047a3afae9867d65f748b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1732018728205 2024-11-19T12:18:56,000 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9929d8e31a741caa87b923bac8e7596, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732018728555 2024-11-19T12:18:56,000 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c35a58fc5f1487795bc625053d951ae, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732018728555 2024-11-19T12:18:56,001 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26252fff77cf4c609e06aeae626d65dc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732018729173 2024-11-19T12:18:56,001 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e806b8f0e5a4e5e9fed7f31b7b5ae7f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732018729173 2024-11-19T12:18:56,001 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3b30e76f9264a1a98a59419127d4c28, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1732018730320 2024-11-19T12:18:56,001 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 9dc76864587b48ff8495ab8acefceb84, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1732018730320 2024-11-19T12:18:56,001 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting e83808798e6b460f927c2ff8d559e4b1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732018731438 2024-11-19T12:18:56,002 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting fd4442828b67436f91f584de28f76dd1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732018731438 2024-11-19T12:18:56,002 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3cc6c05ec2ec40f995f4e41c39c932d1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732018733613 2024-11-19T12:18:56,002 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 04801c5e17c64a38a13008cb06426458, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732018733613 2024-11-19T12:18:56,014 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#B#compaction#249 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:56,014 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/df3471727c5f425d9546401f69b6340f is 50, key is test_row_0/B:col10/1732018733613/Put/seqid=0 2024-11-19T12:18:56,015 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#A#compaction#250 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:56,015 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/b857e27d3c004abd948af33460b9c6a1 is 50, key is test_row_0/A:col10/1732018733613/Put/seqid=0 2024-11-19T12:18:56,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-19T12:18:56,026 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:56,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-19T12:18:56,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:56,027 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-19T12:18:56,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:18:56,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:56,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:18:56,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:56,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:18:56,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:56,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742120_1296 (size=12595) 2024-11-19T12:18:56,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742121_1297 (size=12595) 2024-11-19T12:18:56,051 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/b857e27d3c004abd948af33460b9c6a1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/b857e27d3c004abd948af33460b9c6a1 2024-11-19T12:18:56,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/222f760e4dd645c2aa85f9efa1accf06 is 50, key is test_row_0/A:col10/1732018734738/Put/seqid=0 2024-11-19T12:18:56,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742122_1298 (size=12151) 2024-11-19T12:18:56,057 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/A of c974ed334ef63a1e045d42c6ff516b94 into b857e27d3c004abd948af33460b9c6a1(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:56,057 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/222f760e4dd645c2aa85f9efa1accf06 2024-11-19T12:18:56,058 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:56,058 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/A, priority=10, startTime=1732018735997; duration=0sec 2024-11-19T12:18:56,058 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:56,058 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:A 2024-11-19T12:18:56,058 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-19T12:18:56,060 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72896 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-19T12:18:56,060 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/C is initiating minor compaction (all files) 2024-11-19T12:18:56,060 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/C in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:56,060 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/92383f58e9574d4686a5124a6bd43e87, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/9957635465fe459c96591fec0f9232bf, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/84b1ee01cd724e3b9877116a6a68cb98, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/cbc81d61948a4227a55ae32e77ba8546, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/137c3d9cfead4546ad30e27fb0551de9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/d5685b91e2824bdd97e08e0583cf139f] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=71.2 K 2024-11-19T12:18:56,061 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92383f58e9574d4686a5124a6bd43e87, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1732018728205 2024-11-19T12:18:56,061 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9957635465fe459c96591fec0f9232bf, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732018728555 2024-11-19T12:18:56,061 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84b1ee01cd724e3b9877116a6a68cb98, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732018729173 2024-11-19T12:18:56,062 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbc81d61948a4227a55ae32e77ba8546, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1732018730320 2024-11-19T12:18:56,062 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 137c3d9cfead4546ad30e27fb0551de9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732018731438 2024-11-19T12:18:56,062 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting d5685b91e2824bdd97e08e0583cf139f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732018733613 2024-11-19T12:18:56,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/95c570cbbb7644cc83fb2a545c07ec4a is 50, key is test_row_0/B:col10/1732018734738/Put/seqid=0 2024-11-19T12:18:56,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742123_1299 (size=12151) 2024-11-19T12:18:56,069 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/95c570cbbb7644cc83fb2a545c07ec4a 2024-11-19T12:18:56,077 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#C#compaction#253 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:56,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/bb700aff6a21410b848fe81108f9048a is 50, key is test_row_0/C:col10/1732018734738/Put/seqid=0 2024-11-19T12:18:56,078 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/dd0ea65f0ef04b4db4aabff00e73cd3f is 50, key is test_row_0/C:col10/1732018733613/Put/seqid=0 2024-11-19T12:18:56,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742124_1300 (size=12151) 2024-11-19T12:18:56,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742125_1301 (size=12595) 2024-11-19T12:18:56,438 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/df3471727c5f425d9546401f69b6340f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/df3471727c5f425d9546401f69b6340f 2024-11-19T12:18:56,442 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/B of c974ed334ef63a1e045d42c6ff516b94 into df3471727c5f425d9546401f69b6340f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:56,442 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:56,442 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/B, priority=10, startTime=1732018735998; duration=0sec 2024-11-19T12:18:56,442 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:56,442 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:B 2024-11-19T12:18:56,481 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/bb700aff6a21410b848fe81108f9048a 2024-11-19T12:18:56,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/222f760e4dd645c2aa85f9efa1accf06 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/222f760e4dd645c2aa85f9efa1accf06 2024-11-19T12:18:56,488 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/dd0ea65f0ef04b4db4aabff00e73cd3f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/dd0ea65f0ef04b4db4aabff00e73cd3f 2024-11-19T12:18:56,489 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/222f760e4dd645c2aa85f9efa1accf06, entries=150, sequenceid=212, filesize=11.9 K 2024-11-19T12:18:56,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/95c570cbbb7644cc83fb2a545c07ec4a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/95c570cbbb7644cc83fb2a545c07ec4a 2024-11-19T12:18:56,493 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/C of c974ed334ef63a1e045d42c6ff516b94 into dd0ea65f0ef04b4db4aabff00e73cd3f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:56,493 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:56,493 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/C, priority=10, startTime=1732018735998; duration=0sec 2024-11-19T12:18:56,493 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:56,493 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:C 2024-11-19T12:18:56,495 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/95c570cbbb7644cc83fb2a545c07ec4a, entries=150, sequenceid=212, filesize=11.9 K 2024-11-19T12:18:56,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/bb700aff6a21410b848fe81108f9048a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/bb700aff6a21410b848fe81108f9048a 2024-11-19T12:18:56,500 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/bb700aff6a21410b848fe81108f9048a, entries=150, sequenceid=212, filesize=11.9 K 2024-11-19T12:18:56,500 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=0 B/0 for c974ed334ef63a1e045d42c6ff516b94 in 473ms, sequenceid=212, compaction requested=false 2024-11-19T12:18:56,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:56,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:56,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-19T12:18:56,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-19T12:18:56,502 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-19T12:18:56,503 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0850 sec 2024-11-19T12:18:56,504 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.0890 sec 2024-11-19T12:18:56,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-19T12:18:56,518 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-19T12:18:56,519 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:56,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-19T12:18:56,521 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:56,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-19T12:18:56,521 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:56,521 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:56,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-19T12:18:56,673 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:56,674 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-19T12:18:56,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:56,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:56,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:56,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-19T12:18:56,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-19T12:18:56,677 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-19T12:18:56,677 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 154 msec 2024-11-19T12:18:56,680 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 158 msec 2024-11-19T12:18:56,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-19T12:18:56,824 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-19T12:18:56,825 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:56,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-19T12:18:56,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-19T12:18:56,830 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:56,830 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:56,830 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:56,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:56,874 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-19T12:18:56,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:18:56,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:56,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:18:56,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:56,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:18:56,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:56,881 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/2dc2342dabfa41d0a22ba13f0f044883 is 50, key is test_row_0/A:col10/1732018736870/Put/seqid=0 2024-11-19T12:18:56,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742126_1302 (size=12151) 2024-11-19T12:18:56,895 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=228 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/2dc2342dabfa41d0a22ba13f0f044883 2024-11-19T12:18:56,902 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/7ce0a19b2d6047b29ccc33b6ed561e67 is 50, key is test_row_0/B:col10/1732018736870/Put/seqid=0 2024-11-19T12:18:56,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742127_1303 (size=12151) 2024-11-19T12:18:56,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:56,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018796924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:56,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:56,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018796924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:56,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-19T12:18:56,982 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:56,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-19T12:18:56,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:56,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:56,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:56,983 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:56,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:56,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:57,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:57,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018797027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:57,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:57,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018797027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:57,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-19T12:18:57,135 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:57,135 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-19T12:18:57,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:57,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:57,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:57,135 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:57,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:57,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:57,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:57,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018797229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:57,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:57,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018797230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:57,287 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:57,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-19T12:18:57,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:57,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:57,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:57,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:57,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:57,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:57,306 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=228 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/7ce0a19b2d6047b29ccc33b6ed561e67 2024-11-19T12:18:57,313 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/b021928e8e5d430eab156a0da5642897 is 50, key is test_row_0/C:col10/1732018736870/Put/seqid=0 2024-11-19T12:18:57,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742128_1304 (size=12151) 2024-11-19T12:18:57,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-19T12:18:57,439 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:57,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-19T12:18:57,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:57,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:57,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:57,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:57,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:57,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:57,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:57,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018797531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:57,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:57,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018797534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:57,592 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:57,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-19T12:18:57,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:57,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:57,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:57,593 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:57,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:57,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:57,717 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=228 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/b021928e8e5d430eab156a0da5642897 2024-11-19T12:18:57,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/2dc2342dabfa41d0a22ba13f0f044883 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/2dc2342dabfa41d0a22ba13f0f044883 2024-11-19T12:18:57,726 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/2dc2342dabfa41d0a22ba13f0f044883, entries=150, sequenceid=228, filesize=11.9 K 2024-11-19T12:18:57,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/7ce0a19b2d6047b29ccc33b6ed561e67 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7ce0a19b2d6047b29ccc33b6ed561e67 2024-11-19T12:18:57,730 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7ce0a19b2d6047b29ccc33b6ed561e67, entries=150, sequenceid=228, filesize=11.9 K 2024-11-19T12:18:57,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/b021928e8e5d430eab156a0da5642897 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/b021928e8e5d430eab156a0da5642897 2024-11-19T12:18:57,735 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/b021928e8e5d430eab156a0da5642897, entries=150, sequenceid=228, filesize=11.9 K 2024-11-19T12:18:57,736 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for c974ed334ef63a1e045d42c6ff516b94 in 862ms, sequenceid=228, compaction requested=true 2024-11-19T12:18:57,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:57,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:18:57,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:57,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:18:57,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:57,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:18:57,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-19T12:18:57,736 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:57,736 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:57,737 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:57,737 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/A is initiating minor compaction (all files) 2024-11-19T12:18:57,737 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/A in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:57,737 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:57,737 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/B is initiating minor compaction (all files) 2024-11-19T12:18:57,737 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/b857e27d3c004abd948af33460b9c6a1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/222f760e4dd645c2aa85f9efa1accf06, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/2dc2342dabfa41d0a22ba13f0f044883] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=36.0 K 2024-11-19T12:18:57,737 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/B in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:57,737 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/df3471727c5f425d9546401f69b6340f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/95c570cbbb7644cc83fb2a545c07ec4a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7ce0a19b2d6047b29ccc33b6ed561e67] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=36.0 K 2024-11-19T12:18:57,738 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting b857e27d3c004abd948af33460b9c6a1, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732018733613 2024-11-19T12:18:57,738 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting df3471727c5f425d9546401f69b6340f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732018733613 2024-11-19T12:18:57,738 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 95c570cbbb7644cc83fb2a545c07ec4a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732018734733 2024-11-19T12:18:57,739 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 222f760e4dd645c2aa85f9efa1accf06, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732018734733 2024-11-19T12:18:57,739 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ce0a19b2d6047b29ccc33b6ed561e67, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=228, earliestPutTs=1732018736870 2024-11-19T12:18:57,739 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2dc2342dabfa41d0a22ba13f0f044883, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=228, earliestPutTs=1732018736870 2024-11-19T12:18:57,744 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:57,745 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-19T12:18:57,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:57,745 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-19T12:18:57,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:18:57,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:57,745 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#A#compaction#258 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:57,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:18:57,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:57,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:18:57,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:57,746 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/ca291961c6354ff2a1c59a4165840429 is 50, key is test_row_0/A:col10/1732018736870/Put/seqid=0 2024-11-19T12:18:57,747 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#B#compaction#259 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:57,747 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/c0b2b1ad6894416aa28da41086961777 is 50, key is test_row_0/B:col10/1732018736870/Put/seqid=0 2024-11-19T12:18:57,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742129_1305 (size=12697) 2024-11-19T12:18:57,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742130_1306 (size=12697) 2024-11-19T12:18:57,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/d3a0187d599c49efb30e70345ff0bcdc is 50, key is test_row_0/A:col10/1732018736923/Put/seqid=0 2024-11-19T12:18:57,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742131_1307 (size=12151) 2024-11-19T12:18:57,761 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/c0b2b1ad6894416aa28da41086961777 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/c0b2b1ad6894416aa28da41086961777 2024-11-19T12:18:57,766 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/B of c974ed334ef63a1e045d42c6ff516b94 into c0b2b1ad6894416aa28da41086961777(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:57,766 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:57,766 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/B, priority=13, startTime=1732018737736; duration=0sec 2024-11-19T12:18:57,766 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:57,766 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:B 2024-11-19T12:18:57,766 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:57,767 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:57,767 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/C is initiating minor compaction (all files) 2024-11-19T12:18:57,767 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/C in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:57,767 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/dd0ea65f0ef04b4db4aabff00e73cd3f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/bb700aff6a21410b848fe81108f9048a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/b021928e8e5d430eab156a0da5642897] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=36.0 K 2024-11-19T12:18:57,767 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting dd0ea65f0ef04b4db4aabff00e73cd3f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732018733613 2024-11-19T12:18:57,768 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting bb700aff6a21410b848fe81108f9048a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732018734733 2024-11-19T12:18:57,768 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting b021928e8e5d430eab156a0da5642897, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=228, earliestPutTs=1732018736870 2024-11-19T12:18:57,775 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#C#compaction#261 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:57,776 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/c316f8f3217647ca9aebf1ff0d148c8c is 50, key is test_row_0/C:col10/1732018736870/Put/seqid=0 2024-11-19T12:18:57,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742132_1308 (size=12697) 2024-11-19T12:18:57,783 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/c316f8f3217647ca9aebf1ff0d148c8c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/c316f8f3217647ca9aebf1ff0d148c8c 2024-11-19T12:18:57,787 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/C of c974ed334ef63a1e045d42c6ff516b94 into c316f8f3217647ca9aebf1ff0d148c8c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:57,787 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:57,787 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/C, priority=13, startTime=1732018737736; duration=0sec 2024-11-19T12:18:57,787 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:57,787 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:C 2024-11-19T12:18:57,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-19T12:18:58,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:58,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:58,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:58,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018798048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:58,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:58,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018798049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:58,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:58,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018798150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:58,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:58,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018798151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:58,156 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/ca291961c6354ff2a1c59a4165840429 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/ca291961c6354ff2a1c59a4165840429 2024-11-19T12:18:58,160 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/A of c974ed334ef63a1e045d42c6ff516b94 into ca291961c6354ff2a1c59a4165840429(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:58,160 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:58,160 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/A, priority=13, startTime=1732018737736; duration=0sec 2024-11-19T12:18:58,160 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:58,160 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:A 2024-11-19T12:18:58,161 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/d3a0187d599c49efb30e70345ff0bcdc 2024-11-19T12:18:58,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/d2c8cacd0c324d129684b50c21370843 is 50, key is test_row_0/B:col10/1732018736923/Put/seqid=0 2024-11-19T12:18:58,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742133_1309 (size=12151) 2024-11-19T12:18:58,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:58,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018798353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:58,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:58,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018798353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:58,572 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/d2c8cacd0c324d129684b50c21370843 2024-11-19T12:18:58,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/b89d45c741c44177b96a388d3dc98577 is 50, key is test_row_0/C:col10/1732018736923/Put/seqid=0 2024-11-19T12:18:58,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742134_1310 (size=12151) 2024-11-19T12:18:58,586 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/b89d45c741c44177b96a388d3dc98577 2024-11-19T12:18:58,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/d3a0187d599c49efb30e70345ff0bcdc as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/d3a0187d599c49efb30e70345ff0bcdc 2024-11-19T12:18:58,594 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/d3a0187d599c49efb30e70345ff0bcdc, entries=150, sequenceid=252, filesize=11.9 K 2024-11-19T12:18:58,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/d2c8cacd0c324d129684b50c21370843 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/d2c8cacd0c324d129684b50c21370843 2024-11-19T12:18:58,599 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/d2c8cacd0c324d129684b50c21370843, entries=150, sequenceid=252, filesize=11.9 K 2024-11-19T12:18:58,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/b89d45c741c44177b96a388d3dc98577 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/b89d45c741c44177b96a388d3dc98577 2024-11-19T12:18:58,603 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/b89d45c741c44177b96a388d3dc98577, entries=150, sequenceid=252, filesize=11.9 K 2024-11-19T12:18:58,605 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for c974ed334ef63a1e045d42c6ff516b94 in 860ms, sequenceid=252, compaction requested=false 2024-11-19T12:18:58,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:58,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:58,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-19T12:18:58,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-19T12:18:58,607 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-19T12:18:58,607 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7760 sec 2024-11-19T12:18:58,609 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.7820 sec 2024-11-19T12:18:58,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:58,659 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-19T12:18:58,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:18:58,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:58,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:18:58,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:58,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:18:58,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:58,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/3c2d5b63ef2c4bb281f5ec507407831a is 50, key is test_row_0/A:col10/1732018738658/Put/seqid=0 2024-11-19T12:18:58,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742135_1311 (size=12301) 2024-11-19T12:18:58,670 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/3c2d5b63ef2c4bb281f5ec507407831a 2024-11-19T12:18:58,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/c4e24bfe69fd4e02889532378bd87c5d is 50, key is test_row_0/B:col10/1732018738658/Put/seqid=0 2024-11-19T12:18:58,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742136_1312 (size=12301) 2024-11-19T12:18:58,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:58,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018798694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:58,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:58,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018798694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:58,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:58,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018798797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:58,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:58,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018798797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:58,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-19T12:18:58,934 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-19T12:18:58,935 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:18:58,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-19T12:18:58,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-19T12:18:58,937 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:18:58,937 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:18:58,938 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:18:59,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:59,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018799000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:59,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:59,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018799001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:59,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-19T12:18:59,086 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/c4e24bfe69fd4e02889532378bd87c5d 2024-11-19T12:18:59,090 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:59,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-19T12:18:59,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:59,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:59,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:59,090 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:59,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:59,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:18:59,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/f3c8c84209a44c06ba2b521044a96274 is 50, key is test_row_0/C:col10/1732018738658/Put/seqid=0 2024-11-19T12:18:59,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742137_1313 (size=12301) 2024-11-19T12:18:59,098 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/f3c8c84209a44c06ba2b521044a96274 2024-11-19T12:18:59,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/3c2d5b63ef2c4bb281f5ec507407831a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3c2d5b63ef2c4bb281f5ec507407831a 2024-11-19T12:18:59,109 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3c2d5b63ef2c4bb281f5ec507407831a, entries=150, sequenceid=269, filesize=12.0 K 2024-11-19T12:18:59,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/c4e24bfe69fd4e02889532378bd87c5d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/c4e24bfe69fd4e02889532378bd87c5d 2024-11-19T12:18:59,113 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/c4e24bfe69fd4e02889532378bd87c5d, entries=150, sequenceid=269, filesize=12.0 K 2024-11-19T12:18:59,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/f3c8c84209a44c06ba2b521044a96274 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/f3c8c84209a44c06ba2b521044a96274 2024-11-19T12:18:59,117 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/f3c8c84209a44c06ba2b521044a96274, entries=150, sequenceid=269, filesize=12.0 K 2024-11-19T12:18:59,117 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for c974ed334ef63a1e045d42c6ff516b94 in 458ms, sequenceid=269, compaction requested=true 2024-11-19T12:18:59,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:59,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:18:59,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:59,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:18:59,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:59,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:18:59,118 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:59,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:59,118 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:59,118 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:59,118 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:59,119 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/B is initiating minor compaction (all files) 2024-11-19T12:18:59,119 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/A is initiating minor compaction (all files) 2024-11-19T12:18:59,119 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/B in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:59,119 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/A in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:59,119 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/c0b2b1ad6894416aa28da41086961777, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/d2c8cacd0c324d129684b50c21370843, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/c4e24bfe69fd4e02889532378bd87c5d] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=36.3 K 2024-11-19T12:18:59,119 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/ca291961c6354ff2a1c59a4165840429, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/d3a0187d599c49efb30e70345ff0bcdc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3c2d5b63ef2c4bb281f5ec507407831a] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=36.3 K 2024-11-19T12:18:59,119 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting c0b2b1ad6894416aa28da41086961777, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=228, earliestPutTs=1732018736870 2024-11-19T12:18:59,119 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca291961c6354ff2a1c59a4165840429, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=228, earliestPutTs=1732018736870 2024-11-19T12:18:59,119 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting d2c8cacd0c324d129684b50c21370843, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732018736920 2024-11-19T12:18:59,120 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3a0187d599c49efb30e70345ff0bcdc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732018736920 2024-11-19T12:18:59,120 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting c4e24bfe69fd4e02889532378bd87c5d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1732018738048 2024-11-19T12:18:59,120 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c2d5b63ef2c4bb281f5ec507407831a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1732018738048 2024-11-19T12:18:59,131 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#B#compaction#267 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:59,131 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/dacbf67dbb464c8894f2d535f40d6a30 is 50, key is test_row_0/B:col10/1732018738658/Put/seqid=0 2024-11-19T12:18:59,134 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#A#compaction#268 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:59,134 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/dbc6e573332f48b4a91d8b463dbfc39f is 50, key is test_row_0/A:col10/1732018738658/Put/seqid=0 2024-11-19T12:18:59,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742138_1314 (size=12949) 2024-11-19T12:18:59,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742139_1315 (size=12949) 2024-11-19T12:18:59,180 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/dbc6e573332f48b4a91d8b463dbfc39f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/dbc6e573332f48b4a91d8b463dbfc39f 2024-11-19T12:18:59,194 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/A of c974ed334ef63a1e045d42c6ff516b94 into dbc6e573332f48b4a91d8b463dbfc39f(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:59,194 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:59,194 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/A, priority=13, startTime=1732018739117; duration=0sec 2024-11-19T12:18:59,195 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:18:59,195 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:A 2024-11-19T12:18:59,195 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:18:59,196 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:18:59,196 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/C is initiating minor compaction (all files) 2024-11-19T12:18:59,196 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/C in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:59,196 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/c316f8f3217647ca9aebf1ff0d148c8c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/b89d45c741c44177b96a388d3dc98577, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/f3c8c84209a44c06ba2b521044a96274] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=36.3 K 2024-11-19T12:18:59,196 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting c316f8f3217647ca9aebf1ff0d148c8c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=228, earliestPutTs=1732018736870 2024-11-19T12:18:59,196 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting b89d45c741c44177b96a388d3dc98577, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732018736920 2024-11-19T12:18:59,197 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3c8c84209a44c06ba2b521044a96274, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1732018738048 2024-11-19T12:18:59,203 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#C#compaction#269 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:18:59,203 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/a16090f7bbad4444a4cf7b488ffd7222 is 50, key is test_row_0/C:col10/1732018738658/Put/seqid=0 2024-11-19T12:18:59,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742140_1316 (size=12949) 2024-11-19T12:18:59,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-19T12:18:59,243 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:18:59,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-19T12:18:59,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:59,244 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-19T12:18:59,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:18:59,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:59,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:18:59,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:59,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:18:59,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:59,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/9d1f7b29d5d44630a4b1a3be182ef103 is 50, key is test_row_0/A:col10/1732018738689/Put/seqid=0 2024-11-19T12:18:59,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742141_1317 (size=12301) 2024-11-19T12:18:59,274 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/9d1f7b29d5d44630a4b1a3be182ef103 2024-11-19T12:18:59,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/a789f7a28afd466eae12e284012a89d9 is 50, key is test_row_0/B:col10/1732018738689/Put/seqid=0 2024-11-19T12:18:59,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742142_1318 (size=12301) 2024-11-19T12:18:59,305 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/a789f7a28afd466eae12e284012a89d9 2024-11-19T12:18:59,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:59,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:18:59,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/6e071bc6a2754e88b7d8e015fcaae0d4 is 50, key is test_row_0/C:col10/1732018738689/Put/seqid=0 2024-11-19T12:18:59,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742143_1319 (size=12301) 2024-11-19T12:18:59,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:59,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:59,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018799322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:59,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018799322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:59,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:59,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018799425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:59,426 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:59,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018799425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:59,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-19T12:18:59,555 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/dacbf67dbb464c8894f2d535f40d6a30 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/dacbf67dbb464c8894f2d535f40d6a30 2024-11-19T12:18:59,560 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/B of c974ed334ef63a1e045d42c6ff516b94 into dacbf67dbb464c8894f2d535f40d6a30(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:59,560 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:59,560 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/B, priority=13, startTime=1732018739118; duration=0sec 2024-11-19T12:18:59,560 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:59,560 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:B 2024-11-19T12:18:59,611 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/a16090f7bbad4444a4cf7b488ffd7222 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/a16090f7bbad4444a4cf7b488ffd7222 2024-11-19T12:18:59,616 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/C of c974ed334ef63a1e045d42c6ff516b94 into a16090f7bbad4444a4cf7b488ffd7222(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:18:59,616 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:59,616 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/C, priority=13, startTime=1732018739118; duration=0sec 2024-11-19T12:18:59,616 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:18:59,616 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:C 2024-11-19T12:18:59,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:59,628 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:59,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018799627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:59,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018799627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:59,719 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/6e071bc6a2754e88b7d8e015fcaae0d4 2024-11-19T12:18:59,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/9d1f7b29d5d44630a4b1a3be182ef103 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/9d1f7b29d5d44630a4b1a3be182ef103 2024-11-19T12:18:59,730 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/9d1f7b29d5d44630a4b1a3be182ef103, entries=150, sequenceid=293, filesize=12.0 K 2024-11-19T12:18:59,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/a789f7a28afd466eae12e284012a89d9 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/a789f7a28afd466eae12e284012a89d9 2024-11-19T12:18:59,734 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/a789f7a28afd466eae12e284012a89d9, entries=150, sequenceid=293, filesize=12.0 K 2024-11-19T12:18:59,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/6e071bc6a2754e88b7d8e015fcaae0d4 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/6e071bc6a2754e88b7d8e015fcaae0d4 2024-11-19T12:18:59,738 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/6e071bc6a2754e88b7d8e015fcaae0d4, entries=150, sequenceid=293, filesize=12.0 K 2024-11-19T12:18:59,740 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for c974ed334ef63a1e045d42c6ff516b94 in 495ms, sequenceid=293, compaction requested=false 2024-11-19T12:18:59,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:18:59,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:18:59,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-19T12:18:59,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-19T12:18:59,744 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-19T12:18:59,744 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 804 msec 2024-11-19T12:18:59,745 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 809 msec 2024-11-19T12:18:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:18:59,932 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-19T12:18:59,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:18:59,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:59,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:18:59,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:59,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:18:59,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:18:59,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/e03b1a1215f244ea8365d1893754b638 is 50, key is test_row_0/A:col10/1732018739931/Put/seqid=0 2024-11-19T12:18:59,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742144_1320 (size=14741) 2024-11-19T12:18:59,978 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018799975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:18:59,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:18:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018799977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-19T12:19:00,040 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-19T12:19:00,041 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:19:00,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-19T12:19:00,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-19T12:19:00,043 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:19:00,043 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:19:00,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:19:00,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:00,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018800079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:00,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:00,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018800080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-19T12:19:00,194 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:00,194 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-19T12:19:00,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:00,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:00,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:00,195 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:00,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:00,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:00,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:00,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018800282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:00,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:00,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018800283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:00,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-19T12:19:00,346 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:00,347 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-19T12:19:00,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:00,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:00,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:00,347 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:00,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:00,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:00,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/e03b1a1215f244ea8365d1893754b638 2024-11-19T12:19:00,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/629904369a004819b218f4dd16746a30 is 50, key is test_row_0/B:col10/1732018739931/Put/seqid=0 2024-11-19T12:19:00,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742145_1321 (size=12301) 2024-11-19T12:19:00,369 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/629904369a004819b218f4dd16746a30 2024-11-19T12:19:00,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/78df54c65ca14b2088d6b7be6a639c50 is 50, key is test_row_0/C:col10/1732018739931/Put/seqid=0 2024-11-19T12:19:00,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742146_1322 (size=12301) 2024-11-19T12:19:00,499 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:00,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-19T12:19:00,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:00,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:00,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:00,500 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:00,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:00,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:00,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:00,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018800584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:00,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:00,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018800587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:00,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-19T12:19:00,652 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:00,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-19T12:19:00,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:00,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:00,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:00,653 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:00,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:00,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:00,779 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/78df54c65ca14b2088d6b7be6a639c50 2024-11-19T12:19:00,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/e03b1a1215f244ea8365d1893754b638 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/e03b1a1215f244ea8365d1893754b638 2024-11-19T12:19:00,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/e03b1a1215f244ea8365d1893754b638, entries=200, sequenceid=310, filesize=14.4 K 2024-11-19T12:19:00,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/629904369a004819b218f4dd16746a30 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/629904369a004819b218f4dd16746a30 2024-11-19T12:19:00,792 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/629904369a004819b218f4dd16746a30, entries=150, sequenceid=310, filesize=12.0 K 2024-11-19T12:19:00,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/78df54c65ca14b2088d6b7be6a639c50 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/78df54c65ca14b2088d6b7be6a639c50 2024-11-19T12:19:00,796 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/78df54c65ca14b2088d6b7be6a639c50, entries=150, sequenceid=310, filesize=12.0 K 2024-11-19T12:19:00,797 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for c974ed334ef63a1e045d42c6ff516b94 in 865ms, sequenceid=310, compaction requested=true 2024-11-19T12:19:00,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:00,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:19:00,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:00,797 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:00,797 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:00,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:19:00,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:00,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:19:00,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:00,798 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39991 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:00,798 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/A is initiating minor compaction (all files) 2024-11-19T12:19:00,798 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/A in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:00,798 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/dbc6e573332f48b4a91d8b463dbfc39f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/9d1f7b29d5d44630a4b1a3be182ef103, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/e03b1a1215f244ea8365d1893754b638] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=39.1 K 2024-11-19T12:19:00,798 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:00,798 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/B is initiating minor compaction (all files) 2024-11-19T12:19:00,798 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbc6e573332f48b4a91d8b463dbfc39f, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1732018738048 2024-11-19T12:19:00,798 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/B in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:00,798 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/dacbf67dbb464c8894f2d535f40d6a30, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/a789f7a28afd466eae12e284012a89d9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/629904369a004819b218f4dd16746a30] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=36.7 K 2024-11-19T12:19:00,799 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d1f7b29d5d44630a4b1a3be182ef103, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732018738689 2024-11-19T12:19:00,799 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting dacbf67dbb464c8894f2d535f40d6a30, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1732018738048 2024-11-19T12:19:00,799 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting e03b1a1215f244ea8365d1893754b638, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1732018739320 2024-11-19T12:19:00,799 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting a789f7a28afd466eae12e284012a89d9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732018738689 2024-11-19T12:19:00,800 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 629904369a004819b218f4dd16746a30, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1732018739321 2024-11-19T12:19:00,805 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:00,805 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#A#compaction#276 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:00,805 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/f9da1c6f09b44dd6aea4dbdf9309d34e is 50, key is test_row_0/A:col10/1732018739931/Put/seqid=0 2024-11-19T12:19:00,806 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-19T12:19:00,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:00,806 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-19T12:19:00,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:19:00,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:00,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:19:00,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:00,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:19:00,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:00,808 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#B#compaction#277 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:00,809 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/7a4303dfcf7343a892caf42c5f54cc17 is 50, key is test_row_0/B:col10/1732018739931/Put/seqid=0 2024-11-19T12:19:00,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/3725c9b3ac5a4844a71bea6c24cddd10 is 50, key is test_row_0/A:col10/1732018739971/Put/seqid=0 2024-11-19T12:19:00,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742147_1323 (size=13051) 2024-11-19T12:19:00,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742148_1324 (size=13051) 2024-11-19T12:19:00,826 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/7a4303dfcf7343a892caf42c5f54cc17 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7a4303dfcf7343a892caf42c5f54cc17 2024-11-19T12:19:00,831 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/B of c974ed334ef63a1e045d42c6ff516b94 into 7a4303dfcf7343a892caf42c5f54cc17(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:00,831 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:00,831 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/B, priority=13, startTime=1732018740797; duration=0sec 2024-11-19T12:19:00,831 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:00,831 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:B 2024-11-19T12:19:00,831 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:00,832 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:00,832 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/C is initiating minor compaction (all files) 2024-11-19T12:19:00,832 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/C in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:00,832 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/a16090f7bbad4444a4cf7b488ffd7222, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/6e071bc6a2754e88b7d8e015fcaae0d4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/78df54c65ca14b2088d6b7be6a639c50] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=36.7 K 2024-11-19T12:19:00,832 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting a16090f7bbad4444a4cf7b488ffd7222, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1732018738048 2024-11-19T12:19:00,833 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e071bc6a2754e88b7d8e015fcaae0d4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732018738689 2024-11-19T12:19:00,833 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 78df54c65ca14b2088d6b7be6a639c50, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1732018739321 2024-11-19T12:19:00,841 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#C#compaction#279 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:00,841 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/224d7594470e468393bb3f5cc069aaf0 is 50, key is test_row_0/C:col10/1732018739931/Put/seqid=0 2024-11-19T12:19:00,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742150_1326 (size=13051) 2024-11-19T12:19:00,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742149_1325 (size=12301) 2024-11-19T12:19:00,869 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/3725c9b3ac5a4844a71bea6c24cddd10 2024-11-19T12:19:00,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/673078dbc32e4b02972fc68ab0dfe2bb is 50, key is test_row_0/B:col10/1732018739971/Put/seqid=0 2024-11-19T12:19:00,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742151_1327 (size=12301) 2024-11-19T12:19:01,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:19:01,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:01,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:01,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018801106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:01,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:01,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018801107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:01,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-19T12:19:01,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:01,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018801210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:01,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:01,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018801210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:01,224 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/f9da1c6f09b44dd6aea4dbdf9309d34e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/f9da1c6f09b44dd6aea4dbdf9309d34e 2024-11-19T12:19:01,232 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/A of c974ed334ef63a1e045d42c6ff516b94 into f9da1c6f09b44dd6aea4dbdf9309d34e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:01,232 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:01,232 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/A, priority=13, startTime=1732018740797; duration=0sec 2024-11-19T12:19:01,232 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:01,232 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:A 2024-11-19T12:19:01,271 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/224d7594470e468393bb3f5cc069aaf0 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/224d7594470e468393bb3f5cc069aaf0 2024-11-19T12:19:01,276 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/C of c974ed334ef63a1e045d42c6ff516b94 into 224d7594470e468393bb3f5cc069aaf0(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:01,276 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:01,276 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/C, priority=13, startTime=1732018740797; duration=0sec 2024-11-19T12:19:01,276 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:01,276 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:C 2024-11-19T12:19:01,282 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/673078dbc32e4b02972fc68ab0dfe2bb 2024-11-19T12:19:01,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/eef711bd3a6a491b94134f65e1e80bbf is 50, key is test_row_0/C:col10/1732018739971/Put/seqid=0 2024-11-19T12:19:01,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742152_1328 (size=12301) 2024-11-19T12:19:01,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:01,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018801412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:01,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:01,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018801412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:01,691 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/eef711bd3a6a491b94134f65e1e80bbf 2024-11-19T12:19:01,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/3725c9b3ac5a4844a71bea6c24cddd10 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3725c9b3ac5a4844a71bea6c24cddd10 2024-11-19T12:19:01,700 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3725c9b3ac5a4844a71bea6c24cddd10, entries=150, sequenceid=331, filesize=12.0 K 2024-11-19T12:19:01,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/673078dbc32e4b02972fc68ab0dfe2bb as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/673078dbc32e4b02972fc68ab0dfe2bb 2024-11-19T12:19:01,704 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/673078dbc32e4b02972fc68ab0dfe2bb, entries=150, sequenceid=331, filesize=12.0 K 2024-11-19T12:19:01,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/eef711bd3a6a491b94134f65e1e80bbf as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/eef711bd3a6a491b94134f65e1e80bbf 2024-11-19T12:19:01,708 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/eef711bd3a6a491b94134f65e1e80bbf, entries=150, sequenceid=331, filesize=12.0 K 2024-11-19T12:19:01,709 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for c974ed334ef63a1e045d42c6ff516b94 in 903ms, sequenceid=331, compaction requested=false 2024-11-19T12:19:01,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:01,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:01,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-19T12:19:01,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-19T12:19:01,714 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-19T12:19:01,714 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6680 sec 2024-11-19T12:19:01,715 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.6730 sec 2024-11-19T12:19:01,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:19:01,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-19T12:19:01,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:19:01,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:01,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:19:01,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:01,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:19:01,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:01,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/9cdc378475ec46278f7397dd0688fd00 is 50, key is test_row_0/A:col10/1732018741721/Put/seqid=0 2024-11-19T12:19:01,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742153_1329 (size=12301) 2024-11-19T12:19:01,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:01,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018801760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:01,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:01,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018801760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:01,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:01,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018801864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:01,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:01,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018801864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:02,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:02,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018802067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:02,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:02,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018802067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:02,141 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/9cdc378475ec46278f7397dd0688fd00 2024-11-19T12:19:02,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-19T12:19:02,146 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-19T12:19:02,150 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:19:02,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-11-19T12:19:02,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-19T12:19:02,152 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:19:02,152 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:19:02,152 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:19:02,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/7e68c05f6f1a4cbebbd3000bbbe7fff5 is 50, key is test_row_0/B:col10/1732018741721/Put/seqid=0 2024-11-19T12:19:02,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742154_1330 (size=12301) 2024-11-19T12:19:02,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-19T12:19:02,307 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:02,307 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-19T12:19:02,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:02,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:02,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:02,308 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:02,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:02,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:02,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:02,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018802370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:02,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:02,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018802370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:02,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-19T12:19:02,460 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:02,460 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-19T12:19:02,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:02,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:02,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:02,460 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:02,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:02,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:02,562 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/7e68c05f6f1a4cbebbd3000bbbe7fff5 2024-11-19T12:19:02,571 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/780b7953f1b54c619d437011dfd11e16 is 50, key is test_row_0/C:col10/1732018741721/Put/seqid=0 2024-11-19T12:19:02,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742155_1331 (size=12301) 2024-11-19T12:19:02,579 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/780b7953f1b54c619d437011dfd11e16 2024-11-19T12:19:02,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/9cdc378475ec46278f7397dd0688fd00 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/9cdc378475ec46278f7397dd0688fd00 2024-11-19T12:19:02,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/9cdc378475ec46278f7397dd0688fd00, entries=150, sequenceid=351, filesize=12.0 K 2024-11-19T12:19:02,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/7e68c05f6f1a4cbebbd3000bbbe7fff5 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7e68c05f6f1a4cbebbd3000bbbe7fff5 2024-11-19T12:19:02,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7e68c05f6f1a4cbebbd3000bbbe7fff5, entries=150, sequenceid=351, filesize=12.0 K 2024-11-19T12:19:02,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/780b7953f1b54c619d437011dfd11e16 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/780b7953f1b54c619d437011dfd11e16 2024-11-19T12:19:02,599 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/780b7953f1b54c619d437011dfd11e16, entries=150, sequenceid=351, filesize=12.0 K 2024-11-19T12:19:02,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for c974ed334ef63a1e045d42c6ff516b94 in 876ms, sequenceid=351, compaction requested=true 2024-11-19T12:19:02,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:02,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:19:02,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:02,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:19:02,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:02,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:19:02,600 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:02,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:02,600 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:02,602 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:02,602 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:02,602 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/B is initiating minor compaction (all files) 2024-11-19T12:19:02,602 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/A is initiating minor compaction (all files) 2024-11-19T12:19:02,603 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/A in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:02,603 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/B in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:02,603 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/f9da1c6f09b44dd6aea4dbdf9309d34e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3725c9b3ac5a4844a71bea6c24cddd10, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/9cdc378475ec46278f7397dd0688fd00] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=36.8 K 2024-11-19T12:19:02,603 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7a4303dfcf7343a892caf42c5f54cc17, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/673078dbc32e4b02972fc68ab0dfe2bb, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7e68c05f6f1a4cbebbd3000bbbe7fff5] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=36.8 K 2024-11-19T12:19:02,603 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9da1c6f09b44dd6aea4dbdf9309d34e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1732018739321 2024-11-19T12:19:02,603 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a4303dfcf7343a892caf42c5f54cc17, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1732018739321 2024-11-19T12:19:02,604 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 673078dbc32e4b02972fc68ab0dfe2bb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732018739971 2024-11-19T12:19:02,604 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3725c9b3ac5a4844a71bea6c24cddd10, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732018739971 2024-11-19T12:19:02,605 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cdc378475ec46278f7397dd0688fd00, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732018741098 2024-11-19T12:19:02,605 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e68c05f6f1a4cbebbd3000bbbe7fff5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732018741098 2024-11-19T12:19:02,612 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#B#compaction#285 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:02,612 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/b372f608163a42f7baf85e3d2d024c3f is 50, key is test_row_0/B:col10/1732018741721/Put/seqid=0 2024-11-19T12:19:02,612 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:02,613 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-19T12:19:02,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:02,613 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-19T12:19:02,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:19:02,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:02,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:19:02,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:02,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:19:02,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:02,614 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#A#compaction#286 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:02,614 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/727c7679914e4cc8998f3e372bf6cfe2 is 50, key is test_row_0/A:col10/1732018741721/Put/seqid=0 2024-11-19T12:19:02,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/1862b28e6757465c91231875d26c28ed is 50, key is test_row_0/A:col10/1732018741752/Put/seqid=0 2024-11-19T12:19:02,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742157_1333 (size=13153) 2024-11-19T12:19:02,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742156_1332 (size=13153) 2024-11-19T12:19:02,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742158_1334 (size=12301) 2024-11-19T12:19:02,623 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/1862b28e6757465c91231875d26c28ed 2024-11-19T12:19:02,625 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/b372f608163a42f7baf85e3d2d024c3f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/b372f608163a42f7baf85e3d2d024c3f 2024-11-19T12:19:02,629 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/B of c974ed334ef63a1e045d42c6ff516b94 into b372f608163a42f7baf85e3d2d024c3f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:02,630 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:02,630 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/B, priority=13, startTime=1732018742600; duration=0sec 2024-11-19T12:19:02,630 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:02,630 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:B 2024-11-19T12:19:02,630 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:02,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/b64f94f7990a4490a40ee2c55f8bf8d7 is 50, key is test_row_0/B:col10/1732018741752/Put/seqid=0 2024-11-19T12:19:02,631 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:02,631 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/C is initiating minor compaction (all files) 2024-11-19T12:19:02,631 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/C in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:02,631 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/224d7594470e468393bb3f5cc069aaf0, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/eef711bd3a6a491b94134f65e1e80bbf, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/780b7953f1b54c619d437011dfd11e16] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=36.8 K 2024-11-19T12:19:02,632 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 224d7594470e468393bb3f5cc069aaf0, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1732018739321 2024-11-19T12:19:02,632 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting eef711bd3a6a491b94134f65e1e80bbf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732018739971 2024-11-19T12:19:02,633 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 780b7953f1b54c619d437011dfd11e16, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732018741098 2024-11-19T12:19:02,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742159_1335 (size=12301) 2024-11-19T12:19:02,635 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/b64f94f7990a4490a40ee2c55f8bf8d7 2024-11-19T12:19:02,641 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#C#compaction#289 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:02,642 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/5bedab36ca754f32b577f25c5f96f2d5 is 50, key is test_row_0/C:col10/1732018741721/Put/seqid=0 2024-11-19T12:19:02,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/a3b4457d5c0a421780885a43babee89d is 50, key is test_row_0/C:col10/1732018741752/Put/seqid=0 2024-11-19T12:19:02,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742161_1337 (size=12301) 2024-11-19T12:19:02,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742160_1336 (size=13153) 2024-11-19T12:19:02,649 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/a3b4457d5c0a421780885a43babee89d 2024-11-19T12:19:02,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/1862b28e6757465c91231875d26c28ed as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/1862b28e6757465c91231875d26c28ed 2024-11-19T12:19:02,659 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/1862b28e6757465c91231875d26c28ed, entries=150, sequenceid=371, filesize=12.0 K 2024-11-19T12:19:02,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/b64f94f7990a4490a40ee2c55f8bf8d7 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/b64f94f7990a4490a40ee2c55f8bf8d7 2024-11-19T12:19:02,663 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/b64f94f7990a4490a40ee2c55f8bf8d7, entries=150, sequenceid=371, filesize=12.0 K 2024-11-19T12:19:02,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/a3b4457d5c0a421780885a43babee89d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/a3b4457d5c0a421780885a43babee89d 2024-11-19T12:19:02,668 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/a3b4457d5c0a421780885a43babee89d, entries=150, sequenceid=371, filesize=12.0 K 2024-11-19T12:19:02,668 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=0 B/0 for c974ed334ef63a1e045d42c6ff516b94 in 55ms, sequenceid=371, compaction requested=false 2024-11-19T12:19:02,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:02,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:02,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-11-19T12:19:02,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-11-19T12:19:02,670 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-19T12:19:02,670 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 517 msec 2024-11-19T12:19:02,672 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 521 msec 2024-11-19T12:19:02,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-19T12:19:02,754 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-19T12:19:02,755 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:19:02,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-11-19T12:19:02,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-19T12:19:02,757 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:19:02,757 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:19:02,757 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:19:02,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-19T12:19:02,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:19:02,883 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-19T12:19:02,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:19:02,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:02,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:19:02,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:02,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:19:02,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:02,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/518fdfe39e314cff8dd6f5eff3f24111 is 50, key is test_row_0/A:col10/1732018742881/Put/seqid=0 2024-11-19T12:19:02,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742162_1338 (size=17181) 2024-11-19T12:19:02,909 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:02,909 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:02,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:02,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:02,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:02,909 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:02,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:02,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:02,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:02,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018802920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:02,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:02,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018802921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:03,023 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/727c7679914e4cc8998f3e372bf6cfe2 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/727c7679914e4cc8998f3e372bf6cfe2 2024-11-19T12:19:03,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:03,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018803023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:03,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:03,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018803024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:03,027 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/A of c974ed334ef63a1e045d42c6ff516b94 into 727c7679914e4cc8998f3e372bf6cfe2(size=12.8 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:03,027 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:03,027 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/A, priority=13, startTime=1732018742600; duration=0sec 2024-11-19T12:19:03,027 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:03,027 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:A 2024-11-19T12:19:03,051 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/5bedab36ca754f32b577f25c5f96f2d5 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/5bedab36ca754f32b577f25c5f96f2d5 2024-11-19T12:19:03,055 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/C of c974ed334ef63a1e045d42c6ff516b94 into 5bedab36ca754f32b577f25c5f96f2d5(size=12.8 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:03,055 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:03,055 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/C, priority=13, startTime=1732018742600; duration=0sec 2024-11-19T12:19:03,055 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:03,055 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:C 2024-11-19T12:19:03,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-19T12:19:03,061 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:03,062 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:03,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:03,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:03,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:03,062 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,214 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:03,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:03,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:03,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:03,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:03,215 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:03,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018803227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:03,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:03,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018803228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:03,291 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/518fdfe39e314cff8dd6f5eff3f24111 2024-11-19T12:19:03,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/ffd7cc1ab52644ee9c7a144e9753cd8b is 50, key is test_row_0/B:col10/1732018742881/Put/seqid=0 2024-11-19T12:19:03,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742163_1339 (size=12301) 2024-11-19T12:19:03,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-19T12:19:03,367 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:03,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:03,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:03,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:03,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:03,368 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,519 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:03,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:03,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:03,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:03,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:03,520 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:03,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018803530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:03,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:03,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018803532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:03,672 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:03,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:03,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:03,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:03,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:03,673 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,704 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/ffd7cc1ab52644ee9c7a144e9753cd8b 2024-11-19T12:19:03,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/472a78b3041245c99a12897e7047623a is 50, key is test_row_0/C:col10/1732018742881/Put/seqid=0 2024-11-19T12:19:03,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742164_1340 (size=12301) 2024-11-19T12:19:03,716 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/472a78b3041245c99a12897e7047623a 2024-11-19T12:19:03,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/518fdfe39e314cff8dd6f5eff3f24111 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/518fdfe39e314cff8dd6f5eff3f24111 2024-11-19T12:19:03,723 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/518fdfe39e314cff8dd6f5eff3f24111, entries=250, sequenceid=383, filesize=16.8 K 2024-11-19T12:19:03,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/ffd7cc1ab52644ee9c7a144e9753cd8b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/ffd7cc1ab52644ee9c7a144e9753cd8b 2024-11-19T12:19:03,728 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/ffd7cc1ab52644ee9c7a144e9753cd8b, entries=150, sequenceid=383, filesize=12.0 K 2024-11-19T12:19:03,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/472a78b3041245c99a12897e7047623a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/472a78b3041245c99a12897e7047623a 2024-11-19T12:19:03,733 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/472a78b3041245c99a12897e7047623a, entries=150, sequenceid=383, filesize=12.0 K 2024-11-19T12:19:03,733 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for c974ed334ef63a1e045d42c6ff516b94 in 850ms, sequenceid=383, compaction requested=true 2024-11-19T12:19:03,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:03,734 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:19:03,734 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:03,734 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:03,734 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:03,734 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:19:03,734 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:03,734 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:19:03,734 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:03,735 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42635 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:03,735 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/A is initiating minor compaction (all files) 2024-11-19T12:19:03,735 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/A in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:03,735 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/727c7679914e4cc8998f3e372bf6cfe2, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/1862b28e6757465c91231875d26c28ed, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/518fdfe39e314cff8dd6f5eff3f24111] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=41.6 K 2024-11-19T12:19:03,735 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:03,735 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 727c7679914e4cc8998f3e372bf6cfe2, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732018741098 2024-11-19T12:19:03,735 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/B is initiating minor compaction (all files) 2024-11-19T12:19:03,735 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/B in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:03,736 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/b372f608163a42f7baf85e3d2d024c3f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/b64f94f7990a4490a40ee2c55f8bf8d7, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/ffd7cc1ab52644ee9c7a144e9753cd8b] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=36.9 K 2024-11-19T12:19:03,736 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1862b28e6757465c91231875d26c28ed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732018741752 2024-11-19T12:19:03,736 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting b372f608163a42f7baf85e3d2d024c3f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732018741098 2024-11-19T12:19:03,736 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 518fdfe39e314cff8dd6f5eff3f24111, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732018742874 2024-11-19T12:19:03,736 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting b64f94f7990a4490a40ee2c55f8bf8d7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732018741752 2024-11-19T12:19:03,737 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting ffd7cc1ab52644ee9c7a144e9753cd8b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732018742874 2024-11-19T12:19:03,740 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-19T12:19:03,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:19:03,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:03,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:19:03,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:03,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:19:03,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:03,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:19:03,746 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#B#compaction#295 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:03,746 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/7176dafa84624e5da20640105ae65009 is 50, key is test_row_0/B:col10/1732018742881/Put/seqid=0 2024-11-19T12:19:03,747 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#A#compaction#294 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:03,748 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/7f9c3c5b3b7f4c369684a3cbd14c9774 is 50, key is test_row_0/A:col10/1732018742881/Put/seqid=0 2024-11-19T12:19:03,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:03,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33710 deadline: 1732018803751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:03,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:03,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33704 deadline: 1732018803752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:03,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:03,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33698 deadline: 1732018803754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:03,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/08074043c0704986868d8d6ea7b76992 is 50, key is test_row_0/A:col10/1732018742919/Put/seqid=0 2024-11-19T12:19:03,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742165_1341 (size=13255) 2024-11-19T12:19:03,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742167_1343 (size=14741) 2024-11-19T12:19:03,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742166_1342 (size=13255) 2024-11-19T12:19:03,825 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:03,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:03,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:03,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:03,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:03,825 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:03,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33704 deadline: 1732018803855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:03,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:03,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33698 deadline: 1732018803856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:03,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-19T12:19:03,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:03,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33710 deadline: 1732018803858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:03,977 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:03,977 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:03,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:03,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:03,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:03,978 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:03,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:04,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018804035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:04,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:04,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33740 deadline: 1732018804035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:04,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:04,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33704 deadline: 1732018804059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:04,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:04,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33698 deadline: 1732018804060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:04,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:04,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33710 deadline: 1732018804062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:04,130 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:04,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:04,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:04,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:04,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:04,131 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/08074043c0704986868d8d6ea7b76992 2024-11-19T12:19:04,176 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/7176dafa84624e5da20640105ae65009 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7176dafa84624e5da20640105ae65009 2024-11-19T12:19:04,183 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/B of c974ed334ef63a1e045d42c6ff516b94 into 7176dafa84624e5da20640105ae65009(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:04,183 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:04,183 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/B, priority=13, startTime=1732018743734; duration=0sec 2024-11-19T12:19:04,183 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:04,183 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:B 2024-11-19T12:19:04,184 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:04,184 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/7f9c3c5b3b7f4c369684a3cbd14c9774 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/7f9c3c5b3b7f4c369684a3cbd14c9774 2024-11-19T12:19:04,185 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:04,185 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/C is initiating minor compaction (all files) 2024-11-19T12:19:04,185 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/C in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:04,185 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/5bedab36ca754f32b577f25c5f96f2d5, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/a3b4457d5c0a421780885a43babee89d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/472a78b3041245c99a12897e7047623a] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=36.9 K 2024-11-19T12:19:04,186 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 5bedab36ca754f32b577f25c5f96f2d5, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732018741098 2024-11-19T12:19:04,186 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting a3b4457d5c0a421780885a43babee89d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732018741752 2024-11-19T12:19:04,187 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 472a78b3041245c99a12897e7047623a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732018742874 2024-11-19T12:19:04,189 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/A of c974ed334ef63a1e045d42c6ff516b94 into 7f9c3c5b3b7f4c369684a3cbd14c9774(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:04,189 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:04,189 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/A, priority=13, startTime=1732018743734; duration=0sec 2024-11-19T12:19:04,189 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:04,189 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:A 2024-11-19T12:19:04,194 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/6237fa9ecb8a43a5a7a9ddcbe0a2008c is 50, key is test_row_0/B:col10/1732018742919/Put/seqid=0 2024-11-19T12:19:04,197 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#C#compaction#298 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:04,197 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/2d83a086c8c04dbe951449458dfea1f8 is 50, key is test_row_0/C:col10/1732018742881/Put/seqid=0 2024-11-19T12:19:04,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742168_1344 (size=12301) 2024-11-19T12:19:04,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742169_1345 (size=13255) 2024-11-19T12:19:04,283 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:04,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:04,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:04,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:04,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:04,284 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:04,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33704 deadline: 1732018804363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:04,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:04,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33698 deadline: 1732018804363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:04,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:04,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33710 deadline: 1732018804365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:04,435 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:04,435 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:04,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:04,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:04,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:04,436 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,588 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:04,588 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:04,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:04,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:04,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:04,588 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,614 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/6237fa9ecb8a43a5a7a9ddcbe0a2008c 2024-11-19T12:19:04,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/647eeaad32954743832e5f96d166dd51 is 50, key is test_row_0/C:col10/1732018742919/Put/seqid=0 2024-11-19T12:19:04,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742170_1346 (size=12301) 2024-11-19T12:19:04,636 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/2d83a086c8c04dbe951449458dfea1f8 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/2d83a086c8c04dbe951449458dfea1f8 2024-11-19T12:19:04,640 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/C of c974ed334ef63a1e045d42c6ff516b94 into 2d83a086c8c04dbe951449458dfea1f8(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:04,640 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:04,640 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/C, priority=13, startTime=1732018743734; duration=0sec 2024-11-19T12:19:04,641 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:04,641 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:C 2024-11-19T12:19:04,741 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:04,741 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:04,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:04,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:04,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:04,741 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-19T12:19:04,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:04,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33704 deadline: 1732018804869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:04,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:04,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33698 deadline: 1732018804869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:04,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:04,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33710 deadline: 1732018804870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:04,879 DEBUG [Thread-1186 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0598ef39 to 127.0.0.1:64186 2024-11-19T12:19:04,879 DEBUG [Thread-1186 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:04,879 DEBUG [Thread-1184 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x25f2abe2 to 127.0.0.1:64186 2024-11-19T12:19:04,879 DEBUG [Thread-1192 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x069ef766 to 127.0.0.1:64186 2024-11-19T12:19:04,879 DEBUG [Thread-1184 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:04,879 DEBUG [Thread-1192 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:04,879 DEBUG [Thread-1188 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x23d0f458 to 127.0.0.1:64186 2024-11-19T12:19:04,879 DEBUG [Thread-1188 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:04,880 DEBUG [Thread-1190 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x77780196 to 127.0.0.1:64186 2024-11-19T12:19:04,880 DEBUG [Thread-1190 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:04,893 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:04,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:04,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:04,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:04,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:04,893 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:04,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,026 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/647eeaad32954743832e5f96d166dd51 2024-11-19T12:19:05,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/08074043c0704986868d8d6ea7b76992 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/08074043c0704986868d8d6ea7b76992 2024-11-19T12:19:05,033 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/08074043c0704986868d8d6ea7b76992, entries=200, sequenceid=411, filesize=14.4 K 2024-11-19T12:19:05,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/6237fa9ecb8a43a5a7a9ddcbe0a2008c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/6237fa9ecb8a43a5a7a9ddcbe0a2008c 2024-11-19T12:19:05,036 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/6237fa9ecb8a43a5a7a9ddcbe0a2008c, entries=150, sequenceid=411, filesize=12.0 K 2024-11-19T12:19:05,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/647eeaad32954743832e5f96d166dd51 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/647eeaad32954743832e5f96d166dd51 2024-11-19T12:19:05,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:05,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33738 deadline: 1732018805037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:05,039 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/647eeaad32954743832e5f96d166dd51, entries=150, sequenceid=411, filesize=12.0 K 2024-11-19T12:19:05,040 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for c974ed334ef63a1e045d42c6ff516b94 in 1300ms, sequenceid=411, compaction requested=false 2024-11-19T12:19:05,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:05,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:19:05,042 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-19T12:19:05,042 DEBUG [Thread-1181 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12e88ea6 to 127.0.0.1:64186 2024-11-19T12:19:05,042 DEBUG [Thread-1181 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:05,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:19:05,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:05,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:19:05,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:05,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:19:05,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:05,045 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/8d8346d6baf3409f9374942edc682c08 is 50, key is test_row_0/A:col10/1732018743747/Put/seqid=0 2024-11-19T12:19:05,045 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:05,046 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:05,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:05,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:05,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:05,046 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742171_1347 (size=12301) 2024-11-19T12:19:05,197 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:05,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:05,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:05,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:05,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:05,198 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,350 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:05,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:05,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:05,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:05,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:05,350 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,449 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/8d8346d6baf3409f9374942edc682c08 2024-11-19T12:19:05,456 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/9dc1c5570f74404abb12b6ab035735d4 is 50, key is test_row_0/B:col10/1732018743747/Put/seqid=0 2024-11-19T12:19:05,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742172_1348 (size=12301) 2024-11-19T12:19:05,502 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:05,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:05,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:05,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:05,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:05,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,654 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:05,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:05,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:05,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:05,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:05,655 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,807 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:05,807 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:05,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:05,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:05,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:05,808 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,860 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/9dc1c5570f74404abb12b6ab035735d4 2024-11-19T12:19:05,866 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/76b74a814b0c444286eda99ff18dc4eb is 50, key is test_row_0/C:col10/1732018743747/Put/seqid=0 2024-11-19T12:19:05,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742173_1349 (size=12301) 2024-11-19T12:19:05,871 DEBUG [Thread-1179 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0ed37f32 to 127.0.0.1:64186 2024-11-19T12:19:05,872 DEBUG [Thread-1179 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:05,876 DEBUG [Thread-1177 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3b2ae977 to 127.0.0.1:64186 2024-11-19T12:19:05,876 DEBUG [Thread-1177 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:05,882 DEBUG [Thread-1175 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x07e48016 to 127.0.0.1:64186 2024-11-19T12:19:05,882 DEBUG [Thread-1175 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:05,959 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:05,960 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:05,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:05,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:05,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:05,960 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:05,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:06,112 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:06,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:06,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:06,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:06,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:06,113 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:06,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:06,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:06,264 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:06,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:06,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:06,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. as already flushing 2024-11-19T12:19:06,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:06,265 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:06,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:06,270 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/76b74a814b0c444286eda99ff18dc4eb 2024-11-19T12:19:06,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/8d8346d6baf3409f9374942edc682c08 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/8d8346d6baf3409f9374942edc682c08 2024-11-19T12:19:06,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/8d8346d6baf3409f9374942edc682c08, entries=150, sequenceid=425, filesize=12.0 K 2024-11-19T12:19:06,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/9dc1c5570f74404abb12b6ab035735d4 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/9dc1c5570f74404abb12b6ab035735d4 2024-11-19T12:19:06,281 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/9dc1c5570f74404abb12b6ab035735d4, entries=150, sequenceid=425, filesize=12.0 K 2024-11-19T12:19:06,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/76b74a814b0c444286eda99ff18dc4eb as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/76b74a814b0c444286eda99ff18dc4eb 2024-11-19T12:19:06,284 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/76b74a814b0c444286eda99ff18dc4eb, entries=150, sequenceid=425, filesize=12.0 K 2024-11-19T12:19:06,285 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=20.13 KB/20610 for c974ed334ef63a1e045d42c6ff516b94 in 1244ms, sequenceid=425, compaction requested=true 2024-11-19T12:19:06,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:06,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:19:06,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:06,285 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:06,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:19:06,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:06,285 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:06,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c974ed334ef63a1e045d42c6ff516b94:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:19:06,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:06,286 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40297 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:06,286 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:06,286 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/A is initiating minor compaction (all files) 2024-11-19T12:19:06,286 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/A in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:06,286 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/B is initiating minor compaction (all files) 2024-11-19T12:19:06,286 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/7f9c3c5b3b7f4c369684a3cbd14c9774, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/08074043c0704986868d8d6ea7b76992, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/8d8346d6baf3409f9374942edc682c08] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=39.4 K 2024-11-19T12:19:06,286 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/B in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:06,286 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7176dafa84624e5da20640105ae65009, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/6237fa9ecb8a43a5a7a9ddcbe0a2008c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/9dc1c5570f74404abb12b6ab035735d4] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=37.0 K 2024-11-19T12:19:06,286 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f9c3c5b3b7f4c369684a3cbd14c9774, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732018742874 2024-11-19T12:19:06,286 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 7176dafa84624e5da20640105ae65009, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732018742874 2024-11-19T12:19:06,286 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 6237fa9ecb8a43a5a7a9ddcbe0a2008c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732018742913 2024-11-19T12:19:06,286 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08074043c0704986868d8d6ea7b76992, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732018742913 2024-11-19T12:19:06,287 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 9dc1c5570f74404abb12b6ab035735d4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1732018743747 2024-11-19T12:19:06,287 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d8346d6baf3409f9374942edc682c08, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1732018743747 2024-11-19T12:19:06,294 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#B#compaction#303 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:06,295 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/daf8e40f4820433ca18b2a0fb08fae4a is 50, key is test_row_0/B:col10/1732018743747/Put/seqid=0 2024-11-19T12:19:06,295 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#A#compaction#304 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:06,295 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/d3fa901618c84de380ed4b1f6401d713 is 50, key is test_row_0/A:col10/1732018743747/Put/seqid=0 2024-11-19T12:19:06,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742174_1350 (size=13357) 2024-11-19T12:19:06,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742175_1351 (size=13357) 2024-11-19T12:19:06,308 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/d3fa901618c84de380ed4b1f6401d713 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/d3fa901618c84de380ed4b1f6401d713 2024-11-19T12:19:06,311 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/A of c974ed334ef63a1e045d42c6ff516b94 into d3fa901618c84de380ed4b1f6401d713(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:06,311 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:06,311 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/A, priority=13, startTime=1732018746285; duration=0sec 2024-11-19T12:19:06,311 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:06,311 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:A 2024-11-19T12:19:06,311 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:06,312 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:06,312 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c974ed334ef63a1e045d42c6ff516b94/C is initiating minor compaction (all files) 2024-11-19T12:19:06,312 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c974ed334ef63a1e045d42c6ff516b94/C in TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:06,312 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/2d83a086c8c04dbe951449458dfea1f8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/647eeaad32954743832e5f96d166dd51, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/76b74a814b0c444286eda99ff18dc4eb] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp, totalSize=37.0 K 2024-11-19T12:19:06,313 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d83a086c8c04dbe951449458dfea1f8, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732018742874 2024-11-19T12:19:06,313 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 647eeaad32954743832e5f96d166dd51, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732018742913 2024-11-19T12:19:06,313 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 76b74a814b0c444286eda99ff18dc4eb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1732018743747 2024-11-19T12:19:06,318 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c974ed334ef63a1e045d42c6ff516b94#C#compaction#305 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:06,319 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/ddf8de5dc2154252bc8a643f6c2dbd61 is 50, key is test_row_0/C:col10/1732018743747/Put/seqid=0 2024-11-19T12:19:06,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742176_1352 (size=13357) 2024-11-19T12:19:06,417 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:06,417 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-19T12:19:06,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:06,418 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-19T12:19:06,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:19:06,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:06,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:19:06,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:06,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:19:06,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:06,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/0353df5a5d1d4d779eb7c70c03a9927f is 50, key is test_row_0/A:col10/1732018745870/Put/seqid=0 2024-11-19T12:19:06,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742177_1353 (size=12301) 2024-11-19T12:19:06,707 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/daf8e40f4820433ca18b2a0fb08fae4a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/daf8e40f4820433ca18b2a0fb08fae4a 2024-11-19T12:19:06,711 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/B of c974ed334ef63a1e045d42c6ff516b94 into daf8e40f4820433ca18b2a0fb08fae4a(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:06,711 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:06,711 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/B, priority=13, startTime=1732018746285; duration=0sec 2024-11-19T12:19:06,711 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:06,711 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:B 2024-11-19T12:19:06,725 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/ddf8de5dc2154252bc8a643f6c2dbd61 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/ddf8de5dc2154252bc8a643f6c2dbd61 2024-11-19T12:19:06,728 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c974ed334ef63a1e045d42c6ff516b94/C of c974ed334ef63a1e045d42c6ff516b94 into ddf8de5dc2154252bc8a643f6c2dbd61(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:06,728 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:06,728 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94., storeName=c974ed334ef63a1e045d42c6ff516b94/C, priority=13, startTime=1732018746285; duration=0sec 2024-11-19T12:19:06,728 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:06,728 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c974ed334ef63a1e045d42c6ff516b94:C 2024-11-19T12:19:06,825 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=432 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/0353df5a5d1d4d779eb7c70c03a9927f 2024-11-19T12:19:06,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/0598f5ee59304501aae69f6800618a62 is 50, key is test_row_0/B:col10/1732018745870/Put/seqid=0 2024-11-19T12:19:06,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742178_1354 (size=12301) 2024-11-19T12:19:06,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-19T12:19:07,047 DEBUG [Thread-1173 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5bab3f39 to 127.0.0.1:64186 2024-11-19T12:19:07,047 DEBUG [Thread-1173 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:07,234 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=432 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/0598f5ee59304501aae69f6800618a62 2024-11-19T12:19:07,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/ac1641a9307f4505ae43929732b1103b is 50, key is test_row_0/C:col10/1732018745870/Put/seqid=0 2024-11-19T12:19:07,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742179_1355 (size=12301) 2024-11-19T12:19:07,643 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=432 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/ac1641a9307f4505ae43929732b1103b 2024-11-19T12:19:07,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/0353df5a5d1d4d779eb7c70c03a9927f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/0353df5a5d1d4d779eb7c70c03a9927f 2024-11-19T12:19:07,650 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/0353df5a5d1d4d779eb7c70c03a9927f, entries=150, sequenceid=432, filesize=12.0 K 2024-11-19T12:19:07,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/0598f5ee59304501aae69f6800618a62 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/0598f5ee59304501aae69f6800618a62 2024-11-19T12:19:07,653 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/0598f5ee59304501aae69f6800618a62, entries=150, sequenceid=432, filesize=12.0 K 2024-11-19T12:19:07,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/ac1641a9307f4505ae43929732b1103b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/ac1641a9307f4505ae43929732b1103b 2024-11-19T12:19:07,657 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/ac1641a9307f4505ae43929732b1103b, entries=150, sequenceid=432, filesize=12.0 K 2024-11-19T12:19:07,657 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=6.71 KB/6870 for c974ed334ef63a1e045d42c6ff516b94 in 1240ms, sequenceid=432, compaction requested=false 2024-11-19T12:19:07,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:07,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:07,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-11-19T12:19:07,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-11-19T12:19:07,660 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-19T12:19:07,660 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.9010 sec 2024-11-19T12:19:07,660 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 4.9050 sec 2024-11-19T12:19:09,927 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T12:19:10,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-19T12:19:10,862 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-19T12:19:10,862 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-19T12:19:10,862 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 150 2024-11-19T12:19:10,862 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 18 2024-11-19T12:19:10,862 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 14 2024-11-19T12:19:10,862 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 18 2024-11-19T12:19:10,862 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 134 2024-11-19T12:19:10,862 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-19T12:19:10,862 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7383 2024-11-19T12:19:10,862 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6931 2024-11-19T12:19:10,862 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6876 2024-11-19T12:19:10,862 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7407 2024-11-19T12:19:10,862 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6959 2024-11-19T12:19:10,862 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-19T12:19:10,862 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-19T12:19:10,862 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x320146a2 to 127.0.0.1:64186 2024-11-19T12:19:10,862 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:10,863 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-19T12:19:10,863 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-19T12:19:10,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-19T12:19:10,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-19T12:19:10,865 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018750865"}]},"ts":"1732018750865"} 2024-11-19T12:19:10,866 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-19T12:19:10,868 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-19T12:19:10,869 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-19T12:19:10,870 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c974ed334ef63a1e045d42c6ff516b94, UNASSIGN}] 2024-11-19T12:19:10,870 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c974ed334ef63a1e045d42c6ff516b94, UNASSIGN 2024-11-19T12:19:10,870 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=c974ed334ef63a1e045d42c6ff516b94, regionState=CLOSING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:19:10,871 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-19T12:19:10,871 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:19:10,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-19T12:19:11,022 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:11,023 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:19:11,023 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-19T12:19:11,023 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing c974ed334ef63a1e045d42c6ff516b94, disabling compactions & flushes 2024-11-19T12:19:11,023 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:11,023 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:11,023 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. after waiting 0 ms 2024-11-19T12:19:11,023 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:11,023 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(2837): Flushing c974ed334ef63a1e045d42c6ff516b94 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-11-19T12:19:11,023 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=A 2024-11-19T12:19:11,023 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:11,023 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=B 2024-11-19T12:19:11,023 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:11,023 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c974ed334ef63a1e045d42c6ff516b94, store=C 2024-11-19T12:19:11,023 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:11,026 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/5b69ec49a369420e90f1e280607aae1c is 50, key is test_row_2/A:col10/1732018747046/Put/seqid=0 2024-11-19T12:19:11,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742180_1356 (size=7415) 2024-11-19T12:19:11,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-19T12:19:11,431 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/5b69ec49a369420e90f1e280607aae1c 2024-11-19T12:19:11,437 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/1bb5b307da3d4ae485b2f2df7309bc91 is 50, key is test_row_2/B:col10/1732018747046/Put/seqid=0 2024-11-19T12:19:11,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742181_1357 (size=7415) 2024-11-19T12:19:11,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-19T12:19:11,841 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/1bb5b307da3d4ae485b2f2df7309bc91 2024-11-19T12:19:11,847 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/ea8a590881d94b79bc4355efa00a01fa is 50, key is test_row_2/C:col10/1732018747046/Put/seqid=0 2024-11-19T12:19:11,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742182_1358 (size=7415) 2024-11-19T12:19:11,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-19T12:19:12,251 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/ea8a590881d94b79bc4355efa00a01fa 2024-11-19T12:19:12,255 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/A/5b69ec49a369420e90f1e280607aae1c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/5b69ec49a369420e90f1e280607aae1c 2024-11-19T12:19:12,257 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/5b69ec49a369420e90f1e280607aae1c, entries=50, sequenceid=438, filesize=7.2 K 2024-11-19T12:19:12,258 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/B/1bb5b307da3d4ae485b2f2df7309bc91 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/1bb5b307da3d4ae485b2f2df7309bc91 2024-11-19T12:19:12,261 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/1bb5b307da3d4ae485b2f2df7309bc91, entries=50, sequenceid=438, filesize=7.2 K 2024-11-19T12:19:12,261 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/.tmp/C/ea8a590881d94b79bc4355efa00a01fa as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/ea8a590881d94b79bc4355efa00a01fa 2024-11-19T12:19:12,264 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/ea8a590881d94b79bc4355efa00a01fa, entries=50, sequenceid=438, filesize=7.2 K 2024-11-19T12:19:12,265 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=0 B/0 for c974ed334ef63a1e045d42c6ff516b94 in 1242ms, sequenceid=438, compaction requested=true 2024-11-19T12:19:12,266 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/959f8366183549758d38989e22c4cd03, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/8e88df4abb26414ebb7b9f835b6fe607, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/c9ff3fcff8434479a820cb74a97a6289, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/6243faae9f4b4e99bdce8c112d0a2d78, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a6eb3ed3acb84470bb3bb0d683123516, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/729c6e20e2c049f0ad9d05d22f639898, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/925f1236bf6047a3afae9867d65f748b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/d69bd38966ec4e879879d25ba92cb7a2, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a9929d8e31a741caa87b923bac8e7596, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/26252fff77cf4c609e06aeae626d65dc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a3b30e76f9264a1a98a59419127d4c28, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/e83808798e6b460f927c2ff8d559e4b1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/b857e27d3c004abd948af33460b9c6a1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3cc6c05ec2ec40f995f4e41c39c932d1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/222f760e4dd645c2aa85f9efa1accf06, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/ca291961c6354ff2a1c59a4165840429, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/2dc2342dabfa41d0a22ba13f0f044883, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/d3a0187d599c49efb30e70345ff0bcdc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/dbc6e573332f48b4a91d8b463dbfc39f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3c2d5b63ef2c4bb281f5ec507407831a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/9d1f7b29d5d44630a4b1a3be182ef103, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/e03b1a1215f244ea8365d1893754b638, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/f9da1c6f09b44dd6aea4dbdf9309d34e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3725c9b3ac5a4844a71bea6c24cddd10, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/727c7679914e4cc8998f3e372bf6cfe2, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/9cdc378475ec46278f7397dd0688fd00, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/1862b28e6757465c91231875d26c28ed, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/518fdfe39e314cff8dd6f5eff3f24111, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/7f9c3c5b3b7f4c369684a3cbd14c9774, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/08074043c0704986868d8d6ea7b76992, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/8d8346d6baf3409f9374942edc682c08] to archive 2024-11-19T12:19:12,267 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:19:12,268 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/959f8366183549758d38989e22c4cd03 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/959f8366183549758d38989e22c4cd03 2024-11-19T12:19:12,269 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/8e88df4abb26414ebb7b9f835b6fe607 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/8e88df4abb26414ebb7b9f835b6fe607 2024-11-19T12:19:12,270 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/c9ff3fcff8434479a820cb74a97a6289 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/c9ff3fcff8434479a820cb74a97a6289 2024-11-19T12:19:12,271 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/6243faae9f4b4e99bdce8c112d0a2d78 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/6243faae9f4b4e99bdce8c112d0a2d78 2024-11-19T12:19:12,271 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a6eb3ed3acb84470bb3bb0d683123516 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a6eb3ed3acb84470bb3bb0d683123516 2024-11-19T12:19:12,272 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/729c6e20e2c049f0ad9d05d22f639898 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/729c6e20e2c049f0ad9d05d22f639898 2024-11-19T12:19:12,273 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/925f1236bf6047a3afae9867d65f748b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/925f1236bf6047a3afae9867d65f748b 2024-11-19T12:19:12,274 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/d69bd38966ec4e879879d25ba92cb7a2 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/d69bd38966ec4e879879d25ba92cb7a2 2024-11-19T12:19:12,275 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a9929d8e31a741caa87b923bac8e7596 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a9929d8e31a741caa87b923bac8e7596 2024-11-19T12:19:12,277 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/26252fff77cf4c609e06aeae626d65dc to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/26252fff77cf4c609e06aeae626d65dc 2024-11-19T12:19:12,278 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a3b30e76f9264a1a98a59419127d4c28 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/a3b30e76f9264a1a98a59419127d4c28 2024-11-19T12:19:12,279 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/e83808798e6b460f927c2ff8d559e4b1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/e83808798e6b460f927c2ff8d559e4b1 2024-11-19T12:19:12,279 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/b857e27d3c004abd948af33460b9c6a1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/b857e27d3c004abd948af33460b9c6a1 2024-11-19T12:19:12,280 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3cc6c05ec2ec40f995f4e41c39c932d1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3cc6c05ec2ec40f995f4e41c39c932d1 2024-11-19T12:19:12,281 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/222f760e4dd645c2aa85f9efa1accf06 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/222f760e4dd645c2aa85f9efa1accf06 2024-11-19T12:19:12,282 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/ca291961c6354ff2a1c59a4165840429 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/ca291961c6354ff2a1c59a4165840429 2024-11-19T12:19:12,283 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/2dc2342dabfa41d0a22ba13f0f044883 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/2dc2342dabfa41d0a22ba13f0f044883 2024-11-19T12:19:12,284 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/d3a0187d599c49efb30e70345ff0bcdc to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/d3a0187d599c49efb30e70345ff0bcdc 2024-11-19T12:19:12,284 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/dbc6e573332f48b4a91d8b463dbfc39f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/dbc6e573332f48b4a91d8b463dbfc39f 2024-11-19T12:19:12,285 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3c2d5b63ef2c4bb281f5ec507407831a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3c2d5b63ef2c4bb281f5ec507407831a 2024-11-19T12:19:12,286 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/9d1f7b29d5d44630a4b1a3be182ef103 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/9d1f7b29d5d44630a4b1a3be182ef103 2024-11-19T12:19:12,287 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/e03b1a1215f244ea8365d1893754b638 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/e03b1a1215f244ea8365d1893754b638 2024-11-19T12:19:12,288 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/f9da1c6f09b44dd6aea4dbdf9309d34e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/f9da1c6f09b44dd6aea4dbdf9309d34e 2024-11-19T12:19:12,289 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3725c9b3ac5a4844a71bea6c24cddd10 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/3725c9b3ac5a4844a71bea6c24cddd10 2024-11-19T12:19:12,290 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/727c7679914e4cc8998f3e372bf6cfe2 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/727c7679914e4cc8998f3e372bf6cfe2 2024-11-19T12:19:12,291 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/9cdc378475ec46278f7397dd0688fd00 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/9cdc378475ec46278f7397dd0688fd00 2024-11-19T12:19:12,292 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/1862b28e6757465c91231875d26c28ed to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/1862b28e6757465c91231875d26c28ed 2024-11-19T12:19:12,293 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/518fdfe39e314cff8dd6f5eff3f24111 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/518fdfe39e314cff8dd6f5eff3f24111 2024-11-19T12:19:12,293 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/7f9c3c5b3b7f4c369684a3cbd14c9774 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/7f9c3c5b3b7f4c369684a3cbd14c9774 2024-11-19T12:19:12,294 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/08074043c0704986868d8d6ea7b76992 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/08074043c0704986868d8d6ea7b76992 2024-11-19T12:19:12,295 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/8d8346d6baf3409f9374942edc682c08 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/8d8346d6baf3409f9374942edc682c08 2024-11-19T12:19:12,296 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/eb83648fd2f0482b83b2afac0b3972c2, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/6080bde75fb74d8281eee21d6e14f706, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7d6bbece50ca4ccdad3e552481e87906, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/d01ce41f908d4e6888725f5e0852e1bb, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/41f43fae60c0489db311d80885a3c0fb, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/b40fba99f9a34d00ac37fc9388f2aecf, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/f7e2ed7efdc4447ba6acc37a734fc7d1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/d2111cdd4c2e47b18923412b70e81cc4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/5c35a58fc5f1487795bc625053d951ae, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/0e806b8f0e5a4e5e9fed7f31b7b5ae7f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/9dc76864587b48ff8495ab8acefceb84, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/fd4442828b67436f91f584de28f76dd1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/df3471727c5f425d9546401f69b6340f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/04801c5e17c64a38a13008cb06426458, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/95c570cbbb7644cc83fb2a545c07ec4a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/c0b2b1ad6894416aa28da41086961777, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7ce0a19b2d6047b29ccc33b6ed561e67, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/d2c8cacd0c324d129684b50c21370843, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/dacbf67dbb464c8894f2d535f40d6a30, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/c4e24bfe69fd4e02889532378bd87c5d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/a789f7a28afd466eae12e284012a89d9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7a4303dfcf7343a892caf42c5f54cc17, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/629904369a004819b218f4dd16746a30, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/673078dbc32e4b02972fc68ab0dfe2bb, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/b372f608163a42f7baf85e3d2d024c3f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7e68c05f6f1a4cbebbd3000bbbe7fff5, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/b64f94f7990a4490a40ee2c55f8bf8d7, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7176dafa84624e5da20640105ae65009, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/ffd7cc1ab52644ee9c7a144e9753cd8b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/6237fa9ecb8a43a5a7a9ddcbe0a2008c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/9dc1c5570f74404abb12b6ab035735d4] to archive 2024-11-19T12:19:12,297 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:19:12,298 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/eb83648fd2f0482b83b2afac0b3972c2 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/eb83648fd2f0482b83b2afac0b3972c2 2024-11-19T12:19:12,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/6080bde75fb74d8281eee21d6e14f706 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/6080bde75fb74d8281eee21d6e14f706 2024-11-19T12:19:12,300 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7d6bbece50ca4ccdad3e552481e87906 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7d6bbece50ca4ccdad3e552481e87906 2024-11-19T12:19:12,301 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/d01ce41f908d4e6888725f5e0852e1bb to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/d01ce41f908d4e6888725f5e0852e1bb 2024-11-19T12:19:12,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/41f43fae60c0489db311d80885a3c0fb to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/41f43fae60c0489db311d80885a3c0fb 2024-11-19T12:19:12,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/b40fba99f9a34d00ac37fc9388f2aecf to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/b40fba99f9a34d00ac37fc9388f2aecf 2024-11-19T12:19:12,303 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/f7e2ed7efdc4447ba6acc37a734fc7d1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/f7e2ed7efdc4447ba6acc37a734fc7d1 2024-11-19T12:19:12,304 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/d2111cdd4c2e47b18923412b70e81cc4 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/d2111cdd4c2e47b18923412b70e81cc4 2024-11-19T12:19:12,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/5c35a58fc5f1487795bc625053d951ae to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/5c35a58fc5f1487795bc625053d951ae 2024-11-19T12:19:12,306 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/0e806b8f0e5a4e5e9fed7f31b7b5ae7f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/0e806b8f0e5a4e5e9fed7f31b7b5ae7f 2024-11-19T12:19:12,307 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/9dc76864587b48ff8495ab8acefceb84 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/9dc76864587b48ff8495ab8acefceb84 2024-11-19T12:19:12,308 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/fd4442828b67436f91f584de28f76dd1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/fd4442828b67436f91f584de28f76dd1 2024-11-19T12:19:12,308 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/df3471727c5f425d9546401f69b6340f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/df3471727c5f425d9546401f69b6340f 2024-11-19T12:19:12,309 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/04801c5e17c64a38a13008cb06426458 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/04801c5e17c64a38a13008cb06426458 2024-11-19T12:19:12,310 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/95c570cbbb7644cc83fb2a545c07ec4a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/95c570cbbb7644cc83fb2a545c07ec4a 2024-11-19T12:19:12,311 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/c0b2b1ad6894416aa28da41086961777 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/c0b2b1ad6894416aa28da41086961777 2024-11-19T12:19:12,312 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7ce0a19b2d6047b29ccc33b6ed561e67 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7ce0a19b2d6047b29ccc33b6ed561e67 2024-11-19T12:19:12,312 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/d2c8cacd0c324d129684b50c21370843 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/d2c8cacd0c324d129684b50c21370843 2024-11-19T12:19:12,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/dacbf67dbb464c8894f2d535f40d6a30 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/dacbf67dbb464c8894f2d535f40d6a30 2024-11-19T12:19:12,314 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/c4e24bfe69fd4e02889532378bd87c5d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/c4e24bfe69fd4e02889532378bd87c5d 2024-11-19T12:19:12,315 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/a789f7a28afd466eae12e284012a89d9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/a789f7a28afd466eae12e284012a89d9 2024-11-19T12:19:12,316 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7a4303dfcf7343a892caf42c5f54cc17 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7a4303dfcf7343a892caf42c5f54cc17 2024-11-19T12:19:12,317 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/629904369a004819b218f4dd16746a30 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/629904369a004819b218f4dd16746a30 2024-11-19T12:19:12,318 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/673078dbc32e4b02972fc68ab0dfe2bb to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/673078dbc32e4b02972fc68ab0dfe2bb 2024-11-19T12:19:12,319 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/b372f608163a42f7baf85e3d2d024c3f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/b372f608163a42f7baf85e3d2d024c3f 2024-11-19T12:19:12,319 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7e68c05f6f1a4cbebbd3000bbbe7fff5 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7e68c05f6f1a4cbebbd3000bbbe7fff5 2024-11-19T12:19:12,320 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/b64f94f7990a4490a40ee2c55f8bf8d7 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/b64f94f7990a4490a40ee2c55f8bf8d7 2024-11-19T12:19:12,321 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7176dafa84624e5da20640105ae65009 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/7176dafa84624e5da20640105ae65009 2024-11-19T12:19:12,322 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/ffd7cc1ab52644ee9c7a144e9753cd8b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/ffd7cc1ab52644ee9c7a144e9753cd8b 2024-11-19T12:19:12,323 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/6237fa9ecb8a43a5a7a9ddcbe0a2008c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/6237fa9ecb8a43a5a7a9ddcbe0a2008c 2024-11-19T12:19:12,324 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/9dc1c5570f74404abb12b6ab035735d4 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/9dc1c5570f74404abb12b6ab035735d4 2024-11-19T12:19:12,325 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/217be640898e48efaf854b16147a7dc3, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/2fff2f7d0fa044d8b789f47203baefef, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/5b202ef1c67e485b9b514e46ed91b49c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/69e71de2a48b4ef1b1787e437207b440, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/a814c4b09352487c8b0b98e946080102, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/ae3c4a7cca2e4142b423faa74c8ab7b0, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/92383f58e9574d4686a5124a6bd43e87, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/26845c6a9b97444ab150123dd3c84916, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/9957635465fe459c96591fec0f9232bf, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/84b1ee01cd724e3b9877116a6a68cb98, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/cbc81d61948a4227a55ae32e77ba8546, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/137c3d9cfead4546ad30e27fb0551de9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/dd0ea65f0ef04b4db4aabff00e73cd3f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/d5685b91e2824bdd97e08e0583cf139f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/bb700aff6a21410b848fe81108f9048a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/c316f8f3217647ca9aebf1ff0d148c8c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/b021928e8e5d430eab156a0da5642897, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/b89d45c741c44177b96a388d3dc98577, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/a16090f7bbad4444a4cf7b488ffd7222, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/f3c8c84209a44c06ba2b521044a96274, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/6e071bc6a2754e88b7d8e015fcaae0d4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/224d7594470e468393bb3f5cc069aaf0, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/78df54c65ca14b2088d6b7be6a639c50, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/eef711bd3a6a491b94134f65e1e80bbf, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/5bedab36ca754f32b577f25c5f96f2d5, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/780b7953f1b54c619d437011dfd11e16, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/a3b4457d5c0a421780885a43babee89d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/2d83a086c8c04dbe951449458dfea1f8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/472a78b3041245c99a12897e7047623a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/647eeaad32954743832e5f96d166dd51, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/76b74a814b0c444286eda99ff18dc4eb] to archive 2024-11-19T12:19:12,325 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:19:12,327 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/217be640898e48efaf854b16147a7dc3 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/217be640898e48efaf854b16147a7dc3 2024-11-19T12:19:12,327 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/2fff2f7d0fa044d8b789f47203baefef to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/2fff2f7d0fa044d8b789f47203baefef 2024-11-19T12:19:12,328 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/5b202ef1c67e485b9b514e46ed91b49c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/5b202ef1c67e485b9b514e46ed91b49c 2024-11-19T12:19:12,329 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/69e71de2a48b4ef1b1787e437207b440 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/69e71de2a48b4ef1b1787e437207b440 2024-11-19T12:19:12,330 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/a814c4b09352487c8b0b98e946080102 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/a814c4b09352487c8b0b98e946080102 2024-11-19T12:19:12,331 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/ae3c4a7cca2e4142b423faa74c8ab7b0 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/ae3c4a7cca2e4142b423faa74c8ab7b0 2024-11-19T12:19:12,332 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/92383f58e9574d4686a5124a6bd43e87 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/92383f58e9574d4686a5124a6bd43e87 2024-11-19T12:19:12,333 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/26845c6a9b97444ab150123dd3c84916 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/26845c6a9b97444ab150123dd3c84916 2024-11-19T12:19:12,334 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/9957635465fe459c96591fec0f9232bf to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/9957635465fe459c96591fec0f9232bf 2024-11-19T12:19:12,335 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/84b1ee01cd724e3b9877116a6a68cb98 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/84b1ee01cd724e3b9877116a6a68cb98 2024-11-19T12:19:12,336 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/cbc81d61948a4227a55ae32e77ba8546 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/cbc81d61948a4227a55ae32e77ba8546 2024-11-19T12:19:12,337 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/137c3d9cfead4546ad30e27fb0551de9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/137c3d9cfead4546ad30e27fb0551de9 2024-11-19T12:19:12,338 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/dd0ea65f0ef04b4db4aabff00e73cd3f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/dd0ea65f0ef04b4db4aabff00e73cd3f 2024-11-19T12:19:12,338 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/d5685b91e2824bdd97e08e0583cf139f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/d5685b91e2824bdd97e08e0583cf139f 2024-11-19T12:19:12,339 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/bb700aff6a21410b848fe81108f9048a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/bb700aff6a21410b848fe81108f9048a 2024-11-19T12:19:12,340 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/c316f8f3217647ca9aebf1ff0d148c8c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/c316f8f3217647ca9aebf1ff0d148c8c 2024-11-19T12:19:12,341 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/b021928e8e5d430eab156a0da5642897 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/b021928e8e5d430eab156a0da5642897 2024-11-19T12:19:12,342 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/b89d45c741c44177b96a388d3dc98577 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/b89d45c741c44177b96a388d3dc98577 2024-11-19T12:19:12,343 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/a16090f7bbad4444a4cf7b488ffd7222 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/a16090f7bbad4444a4cf7b488ffd7222 2024-11-19T12:19:12,344 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/f3c8c84209a44c06ba2b521044a96274 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/f3c8c84209a44c06ba2b521044a96274 2024-11-19T12:19:12,345 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/6e071bc6a2754e88b7d8e015fcaae0d4 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/6e071bc6a2754e88b7d8e015fcaae0d4 2024-11-19T12:19:12,346 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/224d7594470e468393bb3f5cc069aaf0 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/224d7594470e468393bb3f5cc069aaf0 2024-11-19T12:19:12,346 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/78df54c65ca14b2088d6b7be6a639c50 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/78df54c65ca14b2088d6b7be6a639c50 2024-11-19T12:19:12,347 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/eef711bd3a6a491b94134f65e1e80bbf to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/eef711bd3a6a491b94134f65e1e80bbf 2024-11-19T12:19:12,348 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/5bedab36ca754f32b577f25c5f96f2d5 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/5bedab36ca754f32b577f25c5f96f2d5 2024-11-19T12:19:12,349 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/780b7953f1b54c619d437011dfd11e16 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/780b7953f1b54c619d437011dfd11e16 2024-11-19T12:19:12,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/a3b4457d5c0a421780885a43babee89d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/a3b4457d5c0a421780885a43babee89d 2024-11-19T12:19:12,351 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/2d83a086c8c04dbe951449458dfea1f8 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/2d83a086c8c04dbe951449458dfea1f8 2024-11-19T12:19:12,352 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/472a78b3041245c99a12897e7047623a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/472a78b3041245c99a12897e7047623a 2024-11-19T12:19:12,352 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/647eeaad32954743832e5f96d166dd51 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/647eeaad32954743832e5f96d166dd51 2024-11-19T12:19:12,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/76b74a814b0c444286eda99ff18dc4eb to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/76b74a814b0c444286eda99ff18dc4eb 2024-11-19T12:19:12,357 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/recovered.edits/441.seqid, newMaxSeqId=441, maxSeqId=1 2024-11-19T12:19:12,358 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94. 2024-11-19T12:19:12,358 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for c974ed334ef63a1e045d42c6ff516b94: 2024-11-19T12:19:12,359 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:19:12,359 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=c974ed334ef63a1e045d42c6ff516b94, regionState=CLOSED 2024-11-19T12:19:12,361 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-19T12:19:12,361 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure c974ed334ef63a1e045d42c6ff516b94, server=af314c41f984,36047,1732018661455 in 1.4890 sec 2024-11-19T12:19:12,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-11-19T12:19:12,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c974ed334ef63a1e045d42c6ff516b94, UNASSIGN in 1.4920 sec 2024-11-19T12:19:12,363 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-19T12:19:12,363 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4930 sec 2024-11-19T12:19:12,364 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018752364"}]},"ts":"1732018752364"} 2024-11-19T12:19:12,365 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-19T12:19:12,367 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-19T12:19:12,368 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5040 sec 2024-11-19T12:19:12,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-19T12:19:12,969 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-11-19T12:19:12,969 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-19T12:19:12,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:19:12,971 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:19:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-19T12:19:12,971 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=97, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:19:12,972 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:19:12,974 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/recovered.edits] 2024-11-19T12:19:12,976 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/0353df5a5d1d4d779eb7c70c03a9927f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/0353df5a5d1d4d779eb7c70c03a9927f 2024-11-19T12:19:12,977 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/5b69ec49a369420e90f1e280607aae1c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/5b69ec49a369420e90f1e280607aae1c 2024-11-19T12:19:12,978 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/d3fa901618c84de380ed4b1f6401d713 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/A/d3fa901618c84de380ed4b1f6401d713 2024-11-19T12:19:12,980 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/0598f5ee59304501aae69f6800618a62 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/0598f5ee59304501aae69f6800618a62 2024-11-19T12:19:12,981 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/1bb5b307da3d4ae485b2f2df7309bc91 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/1bb5b307da3d4ae485b2f2df7309bc91 2024-11-19T12:19:12,982 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/daf8e40f4820433ca18b2a0fb08fae4a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/B/daf8e40f4820433ca18b2a0fb08fae4a 2024-11-19T12:19:12,983 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/ac1641a9307f4505ae43929732b1103b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/ac1641a9307f4505ae43929732b1103b 2024-11-19T12:19:12,984 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/ddf8de5dc2154252bc8a643f6c2dbd61 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/ddf8de5dc2154252bc8a643f6c2dbd61 2024-11-19T12:19:12,985 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/ea8a590881d94b79bc4355efa00a01fa to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/C/ea8a590881d94b79bc4355efa00a01fa 2024-11-19T12:19:12,987 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/recovered.edits/441.seqid to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94/recovered.edits/441.seqid 2024-11-19T12:19:12,988 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c974ed334ef63a1e045d42c6ff516b94 2024-11-19T12:19:12,988 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-19T12:19:12,989 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=97, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:19:12,993 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-19T12:19:12,995 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-19T12:19:12,995 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=97, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:19:12,995 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-19T12:19:12,996 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732018752995"}]},"ts":"9223372036854775807"} 2024-11-19T12:19:12,997 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-19T12:19:12,997 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => c974ed334ef63a1e045d42c6ff516b94, NAME => 'TestAcidGuarantees,,1732018722726.c974ed334ef63a1e045d42c6ff516b94.', STARTKEY => '', ENDKEY => ''}] 2024-11-19T12:19:12,997 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-19T12:19:12,997 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732018752997"}]},"ts":"9223372036854775807"} 2024-11-19T12:19:12,998 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-19T12:19:13,002 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=97, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:19:13,002 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 33 msec 2024-11-19T12:19:13,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-19T12:19:13,072 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 97 completed 2024-11-19T12:19:13,081 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=237 (was 240), OpenFileDescriptor=453 (was 456), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=454 (was 546), ProcessCount=11 (was 11), AvailableMemoryMB=2314 (was 2687) 2024-11-19T12:19:13,089 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=237, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=454, ProcessCount=11, AvailableMemoryMB=2314 2024-11-19T12:19:13,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-19T12:19:13,090 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:19:13,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-19T12:19:13,092 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T12:19:13,092 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:13,092 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 98 2024-11-19T12:19:13,092 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T12:19:13,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-19T12:19:13,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742183_1359 (size=960) 2024-11-19T12:19:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-19T12:19:13,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-19T12:19:13,499 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22 2024-11-19T12:19:13,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742184_1360 (size=53) 2024-11-19T12:19:13,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-19T12:19:13,904 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:19:13,904 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing c569d7fdb1587d224050261fa2ec2f58, disabling compactions & flushes 2024-11-19T12:19:13,904 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:13,905 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:13,905 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. after waiting 0 ms 2024-11-19T12:19:13,905 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:13,905 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:13,905 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:13,905 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T12:19:13,906 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732018753905"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732018753905"}]},"ts":"1732018753905"} 2024-11-19T12:19:13,907 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-19T12:19:13,907 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T12:19:13,907 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018753907"}]},"ts":"1732018753907"} 2024-11-19T12:19:13,908 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-19T12:19:13,912 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c569d7fdb1587d224050261fa2ec2f58, ASSIGN}] 2024-11-19T12:19:13,913 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c569d7fdb1587d224050261fa2ec2f58, ASSIGN 2024-11-19T12:19:13,913 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=c569d7fdb1587d224050261fa2ec2f58, ASSIGN; state=OFFLINE, location=af314c41f984,36047,1732018661455; forceNewPlan=false, retain=false 2024-11-19T12:19:14,063 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=c569d7fdb1587d224050261fa2ec2f58, regionState=OPENING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:19:14,065 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; OpenRegionProcedure c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:19:14,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-19T12:19:14,216 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:14,219 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:14,219 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7285): Opening region: {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:19:14,219 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:14,219 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:19:14,219 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7327): checking encryption for c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:14,220 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7330): checking classloading for c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:14,221 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:14,222 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:19:14,222 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c569d7fdb1587d224050261fa2ec2f58 columnFamilyName A 2024-11-19T12:19:14,222 DEBUG [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:14,222 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.HStore(327): Store=c569d7fdb1587d224050261fa2ec2f58/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:19:14,223 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:14,223 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:19:14,224 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c569d7fdb1587d224050261fa2ec2f58 columnFamilyName B 2024-11-19T12:19:14,224 DEBUG [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:14,224 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.HStore(327): Store=c569d7fdb1587d224050261fa2ec2f58/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:19:14,224 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:14,225 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:19:14,225 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c569d7fdb1587d224050261fa2ec2f58 columnFamilyName C 2024-11-19T12:19:14,225 DEBUG [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:14,225 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.HStore(327): Store=c569d7fdb1587d224050261fa2ec2f58/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:19:14,225 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:14,226 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:14,226 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:14,227 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:19:14,228 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1085): writing seq id for c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:14,230 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:19:14,230 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1102): Opened c569d7fdb1587d224050261fa2ec2f58; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59302417, jitterRate=-0.11632512509822845}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:19:14,231 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1001): Region open journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:14,231 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., pid=100, masterSystemTime=1732018754216 2024-11-19T12:19:14,232 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:14,232 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:14,233 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=c569d7fdb1587d224050261fa2ec2f58, regionState=OPEN, openSeqNum=2, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:19:14,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-19T12:19:14,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; OpenRegionProcedure c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 in 169 msec 2024-11-19T12:19:14,236 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-11-19T12:19:14,236 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c569d7fdb1587d224050261fa2ec2f58, ASSIGN in 323 msec 2024-11-19T12:19:14,236 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T12:19:14,236 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018754236"}]},"ts":"1732018754236"} 2024-11-19T12:19:14,237 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-19T12:19:14,239 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T12:19:14,241 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1490 sec 2024-11-19T12:19:15,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-19T12:19:15,196 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-11-19T12:19:15,197 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7267b857 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6b8b5c25 2024-11-19T12:19:15,201 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c8cc27b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:15,202 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:15,203 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44986, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:15,204 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T12:19:15,205 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38034, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T12:19:15,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-19T12:19:15,206 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:19:15,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=101, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-19T12:19:15,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742185_1361 (size=996) 2024-11-19T12:19:15,618 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-19T12:19:15,618 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-19T12:19:15,620 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-19T12:19:15,621 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c569d7fdb1587d224050261fa2ec2f58, REOPEN/MOVE}] 2024-11-19T12:19:15,622 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c569d7fdb1587d224050261fa2ec2f58, REOPEN/MOVE 2024-11-19T12:19:15,622 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=c569d7fdb1587d224050261fa2ec2f58, regionState=CLOSING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:19:15,623 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-19T12:19:15,623 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=104, ppid=103, state=RUNNABLE; CloseRegionProcedure c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:19:15,775 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:15,775 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(124): Close c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:15,775 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-19T12:19:15,775 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1681): Closing c569d7fdb1587d224050261fa2ec2f58, disabling compactions & flushes 2024-11-19T12:19:15,775 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:15,775 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:15,775 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. after waiting 0 ms 2024-11-19T12:19:15,775 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:15,779 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-19T12:19:15,779 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:15,779 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1635): Region close journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:15,779 WARN [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegionServer(3786): Not adding moved region record: c569d7fdb1587d224050261fa2ec2f58 to self. 2024-11-19T12:19:15,780 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(170): Closed c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:15,781 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=c569d7fdb1587d224050261fa2ec2f58, regionState=CLOSED 2024-11-19T12:19:15,782 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=104, resume processing ppid=103 2024-11-19T12:19:15,782 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, ppid=103, state=SUCCESS; CloseRegionProcedure c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 in 158 msec 2024-11-19T12:19:15,783 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=c569d7fdb1587d224050261fa2ec2f58, REOPEN/MOVE; state=CLOSED, location=af314c41f984,36047,1732018661455; forceNewPlan=false, retain=true 2024-11-19T12:19:15,933 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=c569d7fdb1587d224050261fa2ec2f58, regionState=OPENING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:19:15,934 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=103, state=RUNNABLE; OpenRegionProcedure c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:19:16,086 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:16,088 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:16,088 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7285): Opening region: {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:19:16,089 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:16,089 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:19:16,089 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7327): checking encryption for c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:16,089 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7330): checking classloading for c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:16,090 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:16,091 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:19:16,091 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c569d7fdb1587d224050261fa2ec2f58 columnFamilyName A 2024-11-19T12:19:16,092 DEBUG [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:16,092 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.HStore(327): Store=c569d7fdb1587d224050261fa2ec2f58/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:19:16,093 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:16,093 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:19:16,094 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c569d7fdb1587d224050261fa2ec2f58 columnFamilyName B 2024-11-19T12:19:16,094 DEBUG [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:16,094 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.HStore(327): Store=c569d7fdb1587d224050261fa2ec2f58/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:19:16,094 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:16,094 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:19:16,095 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c569d7fdb1587d224050261fa2ec2f58 columnFamilyName C 2024-11-19T12:19:16,095 DEBUG [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:16,095 INFO [StoreOpener-c569d7fdb1587d224050261fa2ec2f58-1 {}] regionserver.HStore(327): Store=c569d7fdb1587d224050261fa2ec2f58/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:19:16,095 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:16,096 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:16,096 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:16,097 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:19:16,098 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1085): writing seq id for c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:16,098 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1102): Opened c569d7fdb1587d224050261fa2ec2f58; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66326204, jitterRate=-0.011662542819976807}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:19:16,099 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1001): Region open journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:16,100 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., pid=105, masterSystemTime=1732018756085 2024-11-19T12:19:16,101 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:16,101 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:16,101 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=c569d7fdb1587d224050261fa2ec2f58, regionState=OPEN, openSeqNum=5, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,103 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=103 2024-11-19T12:19:16,103 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=103, state=SUCCESS; OpenRegionProcedure c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 in 168 msec 2024-11-19T12:19:16,104 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-19T12:19:16,104 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c569d7fdb1587d224050261fa2ec2f58, REOPEN/MOVE in 482 msec 2024-11-19T12:19:16,105 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-11-19T12:19:16,105 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 484 msec 2024-11-19T12:19:16,107 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 899 msec 2024-11-19T12:19:16,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-11-19T12:19:16,108 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x081cac4f to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@601038b3 2024-11-19T12:19:16,115 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@126abdf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:16,116 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x64a04d7a to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@59434fd 2024-11-19T12:19:16,119 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42d6bca6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:16,119 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3268230a to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@167fda66 2024-11-19T12:19:16,122 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61bb7783, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:16,123 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d7912a0 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bd5983 2024-11-19T12:19:16,125 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f0031d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:16,126 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x54ed1e8a to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b7324d5 2024-11-19T12:19:16,128 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5434c92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:16,129 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d930fb1 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@52abed4d 2024-11-19T12:19:16,132 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d80c576, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:16,132 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x114e6211 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c0234f0 2024-11-19T12:19:16,134 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17a2e973, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:16,135 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x191ae36a to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14b2e10d 2024-11-19T12:19:16,139 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@145b6b99, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:16,139 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x133cc1f0 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1085e013 2024-11-19T12:19:16,142 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fcd5639, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:16,142 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6cfa4b91 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53c0ab65 2024-11-19T12:19:16,148 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@660943ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:16,151 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:19:16,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-19T12:19:16,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-19T12:19:16,152 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:19:16,152 DEBUG [hconnection-0x3f379e42-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:16,152 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:19:16,152 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:19:16,153 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44998, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:16,158 DEBUG [hconnection-0x43973fa6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:16,159 DEBUG [hconnection-0x2da11881-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:16,159 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45026, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:16,159 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45010, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:16,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:16,165 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c569d7fdb1587d224050261fa2ec2f58 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-19T12:19:16,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=A 2024-11-19T12:19:16,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:16,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=B 2024-11-19T12:19:16,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:16,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=C 2024-11-19T12:19:16,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:16,172 DEBUG [hconnection-0x1cbe5ed1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:16,172 DEBUG [hconnection-0x6483d2d7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:16,174 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45030, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:16,174 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45028, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:16,174 DEBUG [hconnection-0x1a551c66-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:16,174 DEBUG [hconnection-0x5146b7b8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:16,176 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45040, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:16,176 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45054, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:16,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018816184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018816185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018816185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018816189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,194 DEBUG [hconnection-0x716e7210-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:16,194 DEBUG [hconnection-0x467e061f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:16,195 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45062, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:16,196 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45058, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:16,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45058 deadline: 1732018816197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,198 DEBUG [hconnection-0x76311231-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:16,199 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45066, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:16,202 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411198b60c96dc9a44b7abc0455f48d18d338_c569d7fdb1587d224050261fa2ec2f58 is 50, key is test_row_0/A:col10/1732018756158/Put/seqid=0 2024-11-19T12:19:16,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742186_1362 (size=12154) 2024-11-19T12:19:16,232 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:16,235 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411198b60c96dc9a44b7abc0455f48d18d338_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411198b60c96dc9a44b7abc0455f48d18d338_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:16,236 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/57836298a7ee4019983eae641342b984, store: [table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:16,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/57836298a7ee4019983eae641342b984 is 175, key is test_row_0/A:col10/1732018756158/Put/seqid=0 2024-11-19T12:19:16,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742187_1363 (size=30955) 2024-11-19T12:19:16,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-19T12:19:16,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018816286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018816286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018816286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018816290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,304 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:16,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45058 deadline: 1732018816299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,304 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-19T12:19:16,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:16,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:16,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:16,305 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:16,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:16,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-19T12:19:16,457 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:16,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-19T12:19:16,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:16,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:16,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:16,458 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:16,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:16,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:16,489 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018816488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018816489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018816489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018816492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45058 deadline: 1732018816506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,610 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:16,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-19T12:19:16,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:16,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:16,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:16,611 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:16,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:16,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:16,653 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/57836298a7ee4019983eae641342b984 2024-11-19T12:19:16,677 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/462934de600e44e998571a9a3f8a6434 is 50, key is test_row_0/B:col10/1732018756158/Put/seqid=0 2024-11-19T12:19:16,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742188_1364 (size=12001) 2024-11-19T12:19:16,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-19T12:19:16,763 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:16,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-19T12:19:16,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:16,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:16,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:16,764 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:16,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:16,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:16,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018816791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018816792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018816792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018816796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:16,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45058 deadline: 1732018816809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:16,916 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:16,916 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-19T12:19:16,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:16,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:16,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:16,917 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:16,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:16,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:17,069 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:17,069 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-19T12:19:17,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:17,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:17,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:17,069 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:17,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:17,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:17,092 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/462934de600e44e998571a9a3f8a6434 2024-11-19T12:19:17,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/486f542f214c4dfe852de1f65bb14c1e is 50, key is test_row_0/C:col10/1732018756158/Put/seqid=0 2024-11-19T12:19:17,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742189_1365 (size=12001) 2024-11-19T12:19:17,221 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:17,222 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-19T12:19:17,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:17,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:17,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:17,222 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:17,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:17,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:17,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-19T12:19:17,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:17,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018817296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:17,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:17,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018817297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:17,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:17,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018817300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:17,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:17,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018817302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:17,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:17,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45058 deadline: 1732018817313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:17,374 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:17,375 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-19T12:19:17,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:17,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:17,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:17,375 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:17,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:17,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:17,518 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/486f542f214c4dfe852de1f65bb14c1e 2024-11-19T12:19:17,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/57836298a7ee4019983eae641342b984 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/57836298a7ee4019983eae641342b984 2024-11-19T12:19:17,526 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/57836298a7ee4019983eae641342b984, entries=150, sequenceid=15, filesize=30.2 K 2024-11-19T12:19:17,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/462934de600e44e998571a9a3f8a6434 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/462934de600e44e998571a9a3f8a6434 2024-11-19T12:19:17,527 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:17,528 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-19T12:19:17,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:17,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:17,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:17,528 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:17,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:17,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:17,531 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/462934de600e44e998571a9a3f8a6434, entries=150, sequenceid=15, filesize=11.7 K 2024-11-19T12:19:17,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/486f542f214c4dfe852de1f65bb14c1e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/486f542f214c4dfe852de1f65bb14c1e 2024-11-19T12:19:17,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/486f542f214c4dfe852de1f65bb14c1e, entries=150, sequenceid=15, filesize=11.7 K 2024-11-19T12:19:17,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for c569d7fdb1587d224050261fa2ec2f58 in 1370ms, sequenceid=15, compaction requested=false 2024-11-19T12:19:17,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:17,680 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:17,681 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-19T12:19:17,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:17,681 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing c569d7fdb1587d224050261fa2ec2f58 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-19T12:19:17,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=A 2024-11-19T12:19:17,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:17,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=B 2024-11-19T12:19:17,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:17,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=C 2024-11-19T12:19:17,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:17,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119b8a27b26b4d1495c9f403966fec9e3ae_c569d7fdb1587d224050261fa2ec2f58 is 50, key is test_row_0/A:col10/1732018756183/Put/seqid=0 2024-11-19T12:19:17,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742190_1366 (size=12154) 2024-11-19T12:19:17,888 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-19T12:19:18,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:18,110 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119b8a27b26b4d1495c9f403966fec9e3ae_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119b8a27b26b4d1495c9f403966fec9e3ae_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:18,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/206a753b9262459f85fc5cff26a07e44, store: [table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:18,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/206a753b9262459f85fc5cff26a07e44 is 175, key is test_row_0/A:col10/1732018756183/Put/seqid=0 2024-11-19T12:19:18,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742191_1367 (size=30955) 2024-11-19T12:19:18,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-19T12:19:18,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:18,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:18,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:18,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018818307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:18,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:18,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018818310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:18,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:18,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018818310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:18,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:18,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018818310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:18,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:18,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45058 deadline: 1732018818318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:18,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:18,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018818411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:18,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:18,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018818413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:18,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:18,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018818414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:18,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:18,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018818414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:18,516 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/206a753b9262459f85fc5cff26a07e44 2024-11-19T12:19:18,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/57958f547f96421da277787ee7b8e9f4 is 50, key is test_row_0/B:col10/1732018756183/Put/seqid=0 2024-11-19T12:19:18,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742192_1368 (size=12001) 2024-11-19T12:19:18,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:18,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018818615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:18,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:18,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018818618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:18,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:18,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018818618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:18,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:18,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018818619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:18,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:18,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018818921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:18,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:18,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018818922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:18,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:18,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018818922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:18,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:18,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018818925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:18,928 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/57958f547f96421da277787ee7b8e9f4 2024-11-19T12:19:18,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/baaeabbdbf724fcb8fe7c5e25f434f4b is 50, key is test_row_0/C:col10/1732018756183/Put/seqid=0 2024-11-19T12:19:18,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742193_1369 (size=12001) 2024-11-19T12:19:18,941 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/baaeabbdbf724fcb8fe7c5e25f434f4b 2024-11-19T12:19:18,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/206a753b9262459f85fc5cff26a07e44 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/206a753b9262459f85fc5cff26a07e44 2024-11-19T12:19:18,949 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/206a753b9262459f85fc5cff26a07e44, entries=150, sequenceid=40, filesize=30.2 K 2024-11-19T12:19:18,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/57958f547f96421da277787ee7b8e9f4 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/57958f547f96421da277787ee7b8e9f4 2024-11-19T12:19:18,954 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/57958f547f96421da277787ee7b8e9f4, entries=150, sequenceid=40, filesize=11.7 K 2024-11-19T12:19:18,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/baaeabbdbf724fcb8fe7c5e25f434f4b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/baaeabbdbf724fcb8fe7c5e25f434f4b 2024-11-19T12:19:18,957 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/baaeabbdbf724fcb8fe7c5e25f434f4b, entries=150, sequenceid=40, filesize=11.7 K 2024-11-19T12:19:18,958 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for c569d7fdb1587d224050261fa2ec2f58 in 1277ms, sequenceid=40, compaction requested=false 2024-11-19T12:19:18,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:18,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:18,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-19T12:19:18,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-19T12:19:18,961 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-19T12:19:18,962 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8080 sec 2024-11-19T12:19:18,963 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 2.8110 sec 2024-11-19T12:19:19,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:19,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c569d7fdb1587d224050261fa2ec2f58 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-19T12:19:19,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=A 2024-11-19T12:19:19,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:19,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=B 2024-11-19T12:19:19,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:19,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=C 2024-11-19T12:19:19,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:19,437 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119a0ca8de898234ea999037fb27329e998_c569d7fdb1587d224050261fa2ec2f58 is 50, key is test_row_0/A:col10/1732018758308/Put/seqid=0 2024-11-19T12:19:19,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742194_1370 (size=17034) 2024-11-19T12:19:19,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:19,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018819458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:19,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:19,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018819459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:19,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:19,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018819460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:19,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:19,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018819461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:19,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:19,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018819565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:19,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:19,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018819565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:19,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:19,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018819569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:19,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:19,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018819569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:19,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:19,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018819769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:19,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:19,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018819769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:19,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:19,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018819774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:19,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:19,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018819775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:19,844 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:19,848 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119a0ca8de898234ea999037fb27329e998_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119a0ca8de898234ea999037fb27329e998_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:19,849 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/17769644803f4f2bb8b6c00edd862b7b, store: [table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:19,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/17769644803f4f2bb8b6c00edd862b7b is 175, key is test_row_0/A:col10/1732018758308/Put/seqid=0 2024-11-19T12:19:19,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742195_1371 (size=48139) 2024-11-19T12:19:20,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:20,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018820074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:20,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:20,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018820074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:20,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:20,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018820079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:20,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:20,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018820079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:20,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-19T12:19:20,257 INFO [Thread-1634 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-19T12:19:20,258 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:19:20,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-19T12:19:20,260 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/17769644803f4f2bb8b6c00edd862b7b 2024-11-19T12:19:20,263 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:19:20,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-19T12:19:20,267 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/bc53b04783034c86a538b561ce48467f is 50, key is test_row_0/B:col10/1732018758308/Put/seqid=0 2024-11-19T12:19:20,272 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:19:20,272 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:19:20,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742196_1372 (size=12001) 2024-11-19T12:19:20,275 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/bc53b04783034c86a538b561ce48467f 2024-11-19T12:19:20,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/cff0ae6a20c746b6b0d77027fbaf4763 is 50, key is test_row_0/C:col10/1732018758308/Put/seqid=0 2024-11-19T12:19:20,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742197_1373 (size=12001) 2024-11-19T12:19:20,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:20,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45058 deadline: 1732018820336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:20,339 DEBUG [Thread-1628 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., hostname=af314c41f984,36047,1732018661455, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:19:20,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-19T12:19:20,424 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:20,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-19T12:19:20,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:20,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:20,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:20,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:20,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:20,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:20,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-19T12:19:20,576 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:20,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-19T12:19:20,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:20,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:20,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:20,577 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:20,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:20,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:20,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:20,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018820582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:20,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:20,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018820583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:20,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:20,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018820585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:20,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:20,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018820587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:20,687 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/cff0ae6a20c746b6b0d77027fbaf4763 2024-11-19T12:19:20,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/17769644803f4f2bb8b6c00edd862b7b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/17769644803f4f2bb8b6c00edd862b7b 2024-11-19T12:19:20,694 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/17769644803f4f2bb8b6c00edd862b7b, entries=250, sequenceid=54, filesize=47.0 K 2024-11-19T12:19:20,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/bc53b04783034c86a538b561ce48467f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/bc53b04783034c86a538b561ce48467f 2024-11-19T12:19:20,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/bc53b04783034c86a538b561ce48467f, entries=150, sequenceid=54, filesize=11.7 K 2024-11-19T12:19:20,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/cff0ae6a20c746b6b0d77027fbaf4763 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/cff0ae6a20c746b6b0d77027fbaf4763 2024-11-19T12:19:20,702 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/cff0ae6a20c746b6b0d77027fbaf4763, entries=150, sequenceid=54, filesize=11.7 K 2024-11-19T12:19:20,702 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for c569d7fdb1587d224050261fa2ec2f58 in 1274ms, sequenceid=54, compaction requested=true 2024-11-19T12:19:20,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:20,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:19:20,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:20,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:19:20,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:20,703 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:20,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:19:20,703 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:20,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:20,704 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:20,704 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:20,704 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/A is initiating minor compaction (all files) 2024-11-19T12:19:20,704 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/B is initiating minor compaction (all files) 2024-11-19T12:19:20,704 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/A in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:20,704 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/B in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:20,704 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/57836298a7ee4019983eae641342b984, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/206a753b9262459f85fc5cff26a07e44, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/17769644803f4f2bb8b6c00edd862b7b] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=107.5 K 2024-11-19T12:19:20,704 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/462934de600e44e998571a9a3f8a6434, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/57958f547f96421da277787ee7b8e9f4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/bc53b04783034c86a538b561ce48467f] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=35.2 K 2024-11-19T12:19:20,704 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:20,704 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/57836298a7ee4019983eae641342b984, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/206a753b9262459f85fc5cff26a07e44, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/17769644803f4f2bb8b6c00edd862b7b] 2024-11-19T12:19:20,705 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 462934de600e44e998571a9a3f8a6434, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732018756158 2024-11-19T12:19:20,705 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57836298a7ee4019983eae641342b984, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732018756158 2024-11-19T12:19:20,705 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 57958f547f96421da277787ee7b8e9f4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732018756179 2024-11-19T12:19:20,705 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 206a753b9262459f85fc5cff26a07e44, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732018756179 2024-11-19T12:19:20,705 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting bc53b04783034c86a538b561ce48467f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732018758308 2024-11-19T12:19:20,705 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17769644803f4f2bb8b6c00edd862b7b, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732018758305 2024-11-19T12:19:20,712 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#B#compaction#321 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:20,713 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/0e1be06b7f0640728cf7d641fea6c795 is 50, key is test_row_0/B:col10/1732018758308/Put/seqid=0 2024-11-19T12:19:20,726 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:20,728 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241119a05cd8e5d3674ce289bf299700173722_c569d7fdb1587d224050261fa2ec2f58 store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:20,729 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:20,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-19T12:19:20,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:20,729 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241119a05cd8e5d3674ce289bf299700173722_c569d7fdb1587d224050261fa2ec2f58, store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:20,729 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing c569d7fdb1587d224050261fa2ec2f58 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-19T12:19:20,730 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119a05cd8e5d3674ce289bf299700173722_c569d7fdb1587d224050261fa2ec2f58 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:20,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=A 2024-11-19T12:19:20,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:20,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=B 2024-11-19T12:19:20,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:20,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=C 2024-11-19T12:19:20,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:20,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742198_1374 (size=12104) 2024-11-19T12:19:20,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742199_1375 (size=4469) 2024-11-19T12:19:20,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411197e5bd4ced32045aeba1577848b815d67_c569d7fdb1587d224050261fa2ec2f58 is 50, key is test_row_0/A:col10/1732018759459/Put/seqid=0 2024-11-19T12:19:20,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742200_1376 (size=12154) 2024-11-19T12:19:20,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-19T12:19:21,138 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/0e1be06b7f0640728cf7d641fea6c795 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/0e1be06b7f0640728cf7d641fea6c795 2024-11-19T12:19:21,139 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#A#compaction#322 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:21,140 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/ee68509521b145a69bfc789bf8a45f75 is 175, key is test_row_0/A:col10/1732018758308/Put/seqid=0 2024-11-19T12:19:21,143 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/B of c569d7fdb1587d224050261fa2ec2f58 into 0e1be06b7f0640728cf7d641fea6c795(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:21,143 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:21,143 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/B, priority=13, startTime=1732018760703; duration=0sec 2024-11-19T12:19:21,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742201_1377 (size=31058) 2024-11-19T12:19:21,144 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:21,144 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:B 2024-11-19T12:19:21,145 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:21,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,153 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:21,153 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/C is initiating minor compaction (all files) 2024-11-19T12:19:21,153 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/C in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:21,153 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/486f542f214c4dfe852de1f65bb14c1e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/baaeabbdbf724fcb8fe7c5e25f434f4b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/cff0ae6a20c746b6b0d77027fbaf4763] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=35.2 K 2024-11-19T12:19:21,154 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 486f542f214c4dfe852de1f65bb14c1e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732018756158 2024-11-19T12:19:21,154 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting baaeabbdbf724fcb8fe7c5e25f434f4b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732018756179 2024-11-19T12:19:21,155 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting cff0ae6a20c746b6b0d77027fbaf4763, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732018758308 2024-11-19T12:19:21,157 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411197e5bd4ced32045aeba1577848b815d67_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411197e5bd4ced32045aeba1577848b815d67_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:21,158 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/ee68509521b145a69bfc789bf8a45f75 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/ee68509521b145a69bfc789bf8a45f75 2024-11-19T12:19:21,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/c49458a8c8a541fb8d81523edfff61b1, store: [table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:21,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/c49458a8c8a541fb8d81523edfff61b1 is 175, key is test_row_0/A:col10/1732018759459/Put/seqid=0 2024-11-19T12:19:21,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742202_1378 (size=30955) 2024-11-19T12:19:21,166 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#C#compaction#324 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:21,166 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/c49458a8c8a541fb8d81523edfff61b1 2024-11-19T12:19:21,166 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/c8876b8e31974f658fee2a2098eb2262 is 50, key is test_row_0/C:col10/1732018758308/Put/seqid=0 2024-11-19T12:19:21,167 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/A of c569d7fdb1587d224050261fa2ec2f58 into ee68509521b145a69bfc789bf8a45f75(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:21,167 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:21,167 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/A, priority=13, startTime=1732018760703; duration=0sec 2024-11-19T12:19:21,167 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:21,167 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:A 2024-11-19T12:19:21,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742203_1379 (size=12104) 2024-11-19T12:19:21,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/c965fe9e7d364688aa232aaf1b33949e is 50, key is test_row_0/B:col10/1732018759459/Put/seqid=0 2024-11-19T12:19:21,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742204_1380 (size=12001) 2024-11-19T12:19:21,178 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/c965fe9e7d364688aa232aaf1b33949e 2024-11-19T12:19:21,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/5acd1606575847f68a1c09f19747a05e is 50, key is test_row_0/C:col10/1732018759459/Put/seqid=0 2024-11-19T12:19:21,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742205_1381 (size=12001) 2024-11-19T12:19:21,190 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/5acd1606575847f68a1c09f19747a05e 2024-11-19T12:19:21,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/c49458a8c8a541fb8d81523edfff61b1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/c49458a8c8a541fb8d81523edfff61b1 2024-11-19T12:19:21,205 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/c49458a8c8a541fb8d81523edfff61b1, entries=150, sequenceid=77, filesize=30.2 K 2024-11-19T12:19:21,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/c965fe9e7d364688aa232aaf1b33949e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/c965fe9e7d364688aa232aaf1b33949e 2024-11-19T12:19:21,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,214 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/c965fe9e7d364688aa232aaf1b33949e, entries=150, sequenceid=77, filesize=11.7 K 2024-11-19T12:19:21,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/5acd1606575847f68a1c09f19747a05e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/5acd1606575847f68a1c09f19747a05e 2024-11-19T12:19:21,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,218 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/5acd1606575847f68a1c09f19747a05e, entries=150, sequenceid=77, filesize=11.7 K 2024-11-19T12:19:21,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,219 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for c569d7fdb1587d224050261fa2ec2f58 in 490ms, sequenceid=77, compaction requested=false 2024-11-19T12:19:21,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:21,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:21,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-19T12:19:21,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-19T12:19:21,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-19T12:19:21,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 948 msec 2024-11-19T12:19:21,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,223 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 964 msec 2024-11-19T12:19:21,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-19T12:19:21,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,367 INFO [Thread-1634 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-19T12:19:21,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,368 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:19:21,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-19T12:19:21,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-19T12:19:21,370 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:19:21,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,371 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:19:21,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,371 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:19:21,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-19T12:19:21,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,523 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:21,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-19T12:19:21,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:21,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:21,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:21,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-19T12:19:21,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-19T12:19:21,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,526 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-19T12:19:21,526 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 154 msec 2024-11-19T12:19:21,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,528 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 159 msec 2024-11-19T12:19:21,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,579 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/c8876b8e31974f658fee2a2098eb2262 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/c8876b8e31974f658fee2a2098eb2262 2024-11-19T12:19:21,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,584 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/C of c569d7fdb1587d224050261fa2ec2f58 into c8876b8e31974f658fee2a2098eb2262(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:21,584 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:21,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,584 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/C, priority=13, startTime=1732018760703; duration=0sec 2024-11-19T12:19:21,584 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:21,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,584 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:C 2024-11-19T12:19:21,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-19T12:19:21,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,673 INFO [Thread-1634 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-19T12:19:21,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,674 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:19:21,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-19T12:19:21,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-19T12:19:21,676 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:19:21,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,676 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:19:21,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,676 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:19:21,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:21,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c569d7fdb1587d224050261fa2ec2f58 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-19T12:19:21,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=A 2024-11-19T12:19:21,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:21,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=B 2024-11-19T12:19:21,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:21,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=C 2024-11-19T12:19:21,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:21,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,742 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119b9f45e4867a040cdb32bf7b642d279a1_c569d7fdb1587d224050261fa2ec2f58 is 50, key is test_row_0/A:col10/1732018761724/Put/seqid=0 2024-11-19T12:19:21,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742206_1382 (size=19474) 2024-11-19T12:19:21,774 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:21,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-19T12:19:21,783 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119b9f45e4867a040cdb32bf7b642d279a1_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119b9f45e4867a040cdb32bf7b642d279a1_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:21,784 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/780efa5f51484541b07ae5c5413880bc, store: [table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:21,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/780efa5f51484541b07ae5c5413880bc is 175, key is test_row_0/A:col10/1732018761724/Put/seqid=0 2024-11-19T12:19:21,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:21,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018821789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:21,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742207_1383 (size=56733) 2024-11-19T12:19:21,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:21,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018821790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:21,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:21,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018821791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:21,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:21,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018821795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:21,827 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:21,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-19T12:19:21,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:21,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:21,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:21,828 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:21,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:21,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:21,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:21,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018821896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:21,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:21,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018821900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:21,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:21,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018821901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:21,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:21,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018821907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:21,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-19T12:19:21,980 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:21,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-19T12:19:21,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:21,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:21,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:21,981 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:21,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:21,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:22,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:22,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018822100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:22,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:22,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018822107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:22,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:22,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018822108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:22,112 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:22,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018822111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:22,133 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:22,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-19T12:19:22,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:22,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:22,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:22,134 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:22,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:22,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:22,197 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=92, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/780efa5f51484541b07ae5c5413880bc 2024-11-19T12:19:22,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/182e14ff1628409b864db2d95586712d is 50, key is test_row_0/B:col10/1732018761724/Put/seqid=0 2024-11-19T12:19:22,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742208_1384 (size=12001) 2024-11-19T12:19:22,218 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/182e14ff1628409b864db2d95586712d 2024-11-19T12:19:22,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/d37e5bda609a4fcfa6eb3b0f4867da06 is 50, key is test_row_0/C:col10/1732018761724/Put/seqid=0 2024-11-19T12:19:22,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742209_1385 (size=12001) 2024-11-19T12:19:22,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/d37e5bda609a4fcfa6eb3b0f4867da06 2024-11-19T12:19:22,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/780efa5f51484541b07ae5c5413880bc as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/780efa5f51484541b07ae5c5413880bc 2024-11-19T12:19:22,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/780efa5f51484541b07ae5c5413880bc, entries=300, sequenceid=92, filesize=55.4 K 2024-11-19T12:19:22,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/182e14ff1628409b864db2d95586712d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/182e14ff1628409b864db2d95586712d 2024-11-19T12:19:22,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/182e14ff1628409b864db2d95586712d, entries=150, sequenceid=92, filesize=11.7 K 2024-11-19T12:19:22,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/d37e5bda609a4fcfa6eb3b0f4867da06 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/d37e5bda609a4fcfa6eb3b0f4867da06 2024-11-19T12:19:22,246 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/d37e5bda609a4fcfa6eb3b0f4867da06, entries=150, sequenceid=92, filesize=11.7 K 2024-11-19T12:19:22,247 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for c569d7fdb1587d224050261fa2ec2f58 in 520ms, sequenceid=92, compaction requested=true 2024-11-19T12:19:22,247 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:22,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:19:22,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:22,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:19:22,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:22,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:19:22,247 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:22,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:22,247 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:22,248 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:22,248 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 118746 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:22,248 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/A is initiating minor compaction (all files) 2024-11-19T12:19:22,248 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/B is initiating minor compaction (all files) 2024-11-19T12:19:22,248 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/A in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:22,248 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/B in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:22,249 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/ee68509521b145a69bfc789bf8a45f75, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/c49458a8c8a541fb8d81523edfff61b1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/780efa5f51484541b07ae5c5413880bc] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=116.0 K 2024-11-19T12:19:22,249 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/0e1be06b7f0640728cf7d641fea6c795, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/c965fe9e7d364688aa232aaf1b33949e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/182e14ff1628409b864db2d95586712d] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=35.3 K 2024-11-19T12:19:22,249 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:22,249 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/ee68509521b145a69bfc789bf8a45f75, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/c49458a8c8a541fb8d81523edfff61b1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/780efa5f51484541b07ae5c5413880bc] 2024-11-19T12:19:22,249 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee68509521b145a69bfc789bf8a45f75, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732018758308 2024-11-19T12:19:22,249 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e1be06b7f0640728cf7d641fea6c795, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732018758308 2024-11-19T12:19:22,249 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting c49458a8c8a541fb8d81523edfff61b1, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732018759458 2024-11-19T12:19:22,249 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting c965fe9e7d364688aa232aaf1b33949e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732018759458 2024-11-19T12:19:22,250 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 780efa5f51484541b07ae5c5413880bc, keycount=300, bloomtype=ROW, size=55.4 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732018761655 2024-11-19T12:19:22,250 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 182e14ff1628409b864db2d95586712d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732018761723 2024-11-19T12:19:22,256 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:22,258 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#B#compaction#330 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:22,258 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/294e35f3e2164bd3982fa35779c1a7f9 is 50, key is test_row_0/B:col10/1732018761724/Put/seqid=0 2024-11-19T12:19:22,260 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411191b5c4d81e5c64193a64563343698cb5f_c569d7fdb1587d224050261fa2ec2f58 store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:22,262 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411191b5c4d81e5c64193a64563343698cb5f_c569d7fdb1587d224050261fa2ec2f58, store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:22,262 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411191b5c4d81e5c64193a64563343698cb5f_c569d7fdb1587d224050261fa2ec2f58 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:22,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742210_1386 (size=12207) 2024-11-19T12:19:22,280 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/294e35f3e2164bd3982fa35779c1a7f9 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/294e35f3e2164bd3982fa35779c1a7f9 2024-11-19T12:19:22,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-19T12:19:22,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742211_1387 (size=4469) 2024-11-19T12:19:22,285 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#A#compaction#331 average throughput is 0.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:22,286 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/32c37611fa814c9dbd3c852526a6529a is 175, key is test_row_0/A:col10/1732018761724/Put/seqid=0 2024-11-19T12:19:22,286 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:22,286 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-19T12:19:22,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:22,287 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing c569d7fdb1587d224050261fa2ec2f58 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-19T12:19:22,288 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/B of c569d7fdb1587d224050261fa2ec2f58 into 294e35f3e2164bd3982fa35779c1a7f9(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:22,288 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:22,288 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/B, priority=13, startTime=1732018762247; duration=0sec 2024-11-19T12:19:22,288 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:22,288 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:B 2024-11-19T12:19:22,288 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:22,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=A 2024-11-19T12:19:22,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:22,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=B 2024-11-19T12:19:22,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:22,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=C 2024-11-19T12:19:22,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:22,290 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:22,291 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/C is initiating minor compaction (all files) 2024-11-19T12:19:22,291 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/C in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:22,291 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/c8876b8e31974f658fee2a2098eb2262, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/5acd1606575847f68a1c09f19747a05e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/d37e5bda609a4fcfa6eb3b0f4867da06] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=35.3 K 2024-11-19T12:19:22,300 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting c8876b8e31974f658fee2a2098eb2262, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732018758308 2024-11-19T12:19:22,300 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 5acd1606575847f68a1c09f19747a05e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732018759458 2024-11-19T12:19:22,300 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting d37e5bda609a4fcfa6eb3b0f4867da06, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732018761723 2024-11-19T12:19:22,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742212_1388 (size=31161) 2024-11-19T12:19:22,318 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#C#compaction#332 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:22,318 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/0fa77f0f2a51447ab5d9827f0de81ada is 50, key is test_row_0/C:col10/1732018761724/Put/seqid=0 2024-11-19T12:19:22,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411193ce68bab026b4b28b96ec52062629ffe_c569d7fdb1587d224050261fa2ec2f58 is 50, key is test_row_0/A:col10/1732018761779/Put/seqid=0 2024-11-19T12:19:22,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742214_1390 (size=12154) 2024-11-19T12:19:22,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:22,330 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411193ce68bab026b4b28b96ec52062629ffe_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411193ce68bab026b4b28b96ec52062629ffe_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:22,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/b949f68c19f94a96ab75da1b367c0b5d, store: [table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:22,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/b949f68c19f94a96ab75da1b367c0b5d is 175, key is test_row_0/A:col10/1732018761779/Put/seqid=0 2024-11-19T12:19:22,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742213_1389 (size=12207) 2024-11-19T12:19:22,338 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/0fa77f0f2a51447ab5d9827f0de81ada as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/0fa77f0f2a51447ab5d9827f0de81ada 2024-11-19T12:19:22,343 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/C of c569d7fdb1587d224050261fa2ec2f58 into 0fa77f0f2a51447ab5d9827f0de81ada(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:22,343 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:22,343 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/C, priority=13, startTime=1732018762247; duration=0sec 2024-11-19T12:19:22,343 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:22,343 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:C 2024-11-19T12:19:22,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742215_1391 (size=30955) 2024-11-19T12:19:22,346 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/b949f68c19f94a96ab75da1b367c0b5d 2024-11-19T12:19:22,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/ff41c1b13bde41e2af0231ca8bfc26b8 is 50, key is test_row_0/B:col10/1732018761779/Put/seqid=0 2024-11-19T12:19:22,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742216_1392 (size=12001) 2024-11-19T12:19:22,368 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/ff41c1b13bde41e2af0231ca8bfc26b8 2024-11-19T12:19:22,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/c328a23dbb3646bfb750694db2545e20 is 50, key is test_row_0/C:col10/1732018761779/Put/seqid=0 2024-11-19T12:19:22,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742217_1393 (size=12001) 2024-11-19T12:19:22,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:22,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:22,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:22,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018822423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:22,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:22,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018822430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:22,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:22,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018822430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:22,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:22,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018822431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:22,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:22,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018822531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:22,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:22,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018822535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:22,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:22,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018822535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:22,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:22,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018822539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:22,716 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/32c37611fa814c9dbd3c852526a6529a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/32c37611fa814c9dbd3c852526a6529a 2024-11-19T12:19:22,722 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/A of c569d7fdb1587d224050261fa2ec2f58 into 32c37611fa814c9dbd3c852526a6529a(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:22,722 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:22,722 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/A, priority=13, startTime=1732018762247; duration=0sec 2024-11-19T12:19:22,722 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:22,722 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:A 2024-11-19T12:19:22,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:22,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018822738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:22,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:22,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018822738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:22,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:22,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018822739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:22,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:22,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018822745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:22,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-19T12:19:22,788 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/c328a23dbb3646bfb750694db2545e20 2024-11-19T12:19:22,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/b949f68c19f94a96ab75da1b367c0b5d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/b949f68c19f94a96ab75da1b367c0b5d 2024-11-19T12:19:22,795 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/b949f68c19f94a96ab75da1b367c0b5d, entries=150, sequenceid=118, filesize=30.2 K 2024-11-19T12:19:22,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/ff41c1b13bde41e2af0231ca8bfc26b8 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/ff41c1b13bde41e2af0231ca8bfc26b8 2024-11-19T12:19:22,799 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/ff41c1b13bde41e2af0231ca8bfc26b8, entries=150, sequenceid=118, filesize=11.7 K 2024-11-19T12:19:22,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/c328a23dbb3646bfb750694db2545e20 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/c328a23dbb3646bfb750694db2545e20 2024-11-19T12:19:22,803 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/c328a23dbb3646bfb750694db2545e20, entries=150, sequenceid=118, filesize=11.7 K 2024-11-19T12:19:22,803 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for c569d7fdb1587d224050261fa2ec2f58 in 516ms, sequenceid=118, compaction requested=false 2024-11-19T12:19:22,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:22,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:22,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-19T12:19:22,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-19T12:19:22,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-19T12:19:22,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1290 sec 2024-11-19T12:19:22,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.1330 sec 2024-11-19T12:19:23,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:23,047 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c569d7fdb1587d224050261fa2ec2f58 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-19T12:19:23,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=A 2024-11-19T12:19:23,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:23,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=B 2024-11-19T12:19:23,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:23,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=C 2024-11-19T12:19:23,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:23,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411195c6cc251ff374f8a9fd3043aa3ba8ef3_c569d7fdb1587d224050261fa2ec2f58 is 50, key is test_row_0/A:col10/1732018763045/Put/seqid=0 2024-11-19T12:19:23,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742218_1394 (size=12304) 2024-11-19T12:19:23,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:23,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018823072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:23,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:23,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018823073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:23,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:23,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018823076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:23,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:23,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018823077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:23,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:23,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018823177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:23,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:23,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018823178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:23,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:23,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018823182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:23,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:23,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018823182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:23,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:23,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018823383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:23,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:23,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018823383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:23,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:23,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018823384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:23,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:23,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018823387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:23,458 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:23,462 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411195c6cc251ff374f8a9fd3043aa3ba8ef3_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411195c6cc251ff374f8a9fd3043aa3ba8ef3_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:23,462 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/d680890f0a0c43e482c3bf2f6eaf1503, store: [table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:23,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/d680890f0a0c43e482c3bf2f6eaf1503 is 175, key is test_row_0/A:col10/1732018763045/Put/seqid=0 2024-11-19T12:19:23,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742219_1395 (size=31105) 2024-11-19T12:19:23,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:23,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018823686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:23,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:23,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018823686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:23,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:23,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018823688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:23,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:23,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018823694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:23,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-19T12:19:23,782 INFO [Thread-1634 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-19T12:19:23,783 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:19:23,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-19T12:19:23,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-19T12:19:23,785 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:19:23,786 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:19:23,786 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:19:23,867 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/d680890f0a0c43e482c3bf2f6eaf1503 2024-11-19T12:19:23,873 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/fd0500b014074ba0bb5713022758a59d is 50, key is test_row_0/B:col10/1732018763045/Put/seqid=0 2024-11-19T12:19:23,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742220_1396 (size=12151) 2024-11-19T12:19:23,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-19T12:19:23,936 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:23,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-19T12:19:23,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:23,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:23,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:23,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:23,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:23,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:24,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-19T12:19:24,088 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:24,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-19T12:19:24,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:24,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:24,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:24,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:24,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:24,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:24,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:24,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018824195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:24,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:24,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018824195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:24,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:24,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018824196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:24,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:24,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018824202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:24,241 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:24,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-19T12:19:24,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:24,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:24,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:24,241 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:24,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:24,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:24,277 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/fd0500b014074ba0bb5713022758a59d 2024-11-19T12:19:24,283 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/131338719d4c4a2ca1a8ea7cf87c5e53 is 50, key is test_row_0/C:col10/1732018763045/Put/seqid=0 2024-11-19T12:19:24,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742221_1397 (size=12151) 2024-11-19T12:19:24,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45058 deadline: 1732018824375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:24,380 DEBUG [Thread-1628 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8199 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., hostname=af314c41f984,36047,1732018661455, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:19:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-19T12:19:24,393 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:24,394 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-19T12:19:24,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:24,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:24,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:24,394 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:24,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:24,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:24,546 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:24,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-19T12:19:24,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:24,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:24,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:24,547 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:24,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:24,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:24,688 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/131338719d4c4a2ca1a8ea7cf87c5e53 2024-11-19T12:19:24,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/d680890f0a0c43e482c3bf2f6eaf1503 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/d680890f0a0c43e482c3bf2f6eaf1503 2024-11-19T12:19:24,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/d680890f0a0c43e482c3bf2f6eaf1503, entries=150, sequenceid=135, filesize=30.4 K 2024-11-19T12:19:24,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/fd0500b014074ba0bb5713022758a59d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/fd0500b014074ba0bb5713022758a59d 2024-11-19T12:19:24,699 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:24,699 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-19T12:19:24,699 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/fd0500b014074ba0bb5713022758a59d, entries=150, sequenceid=135, filesize=11.9 K 2024-11-19T12:19:24,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:24,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:24,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:24,699 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:24,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:24,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/131338719d4c4a2ca1a8ea7cf87c5e53 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/131338719d4c4a2ca1a8ea7cf87c5e53 2024-11-19T12:19:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:24,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/131338719d4c4a2ca1a8ea7cf87c5e53, entries=150, sequenceid=135, filesize=11.9 K 2024-11-19T12:19:24,704 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for c569d7fdb1587d224050261fa2ec2f58 in 1657ms, sequenceid=135, compaction requested=true 2024-11-19T12:19:24,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:24,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:19:24,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:24,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:19:24,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:24,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:19:24,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:24,704 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:24,704 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:24,705 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93221 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:24,705 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/A is initiating minor compaction (all files) 2024-11-19T12:19:24,706 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/A in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:24,706 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/32c37611fa814c9dbd3c852526a6529a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/b949f68c19f94a96ab75da1b367c0b5d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/d680890f0a0c43e482c3bf2f6eaf1503] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=91.0 K 2024-11-19T12:19:24,706 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:24,706 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/32c37611fa814c9dbd3c852526a6529a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/b949f68c19f94a96ab75da1b367c0b5d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/d680890f0a0c43e482c3bf2f6eaf1503] 2024-11-19T12:19:24,706 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:24,706 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/B is initiating minor compaction (all files) 2024-11-19T12:19:24,706 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/B in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:24,706 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32c37611fa814c9dbd3c852526a6529a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732018761723 2024-11-19T12:19:24,706 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/294e35f3e2164bd3982fa35779c1a7f9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/ff41c1b13bde41e2af0231ca8bfc26b8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/fd0500b014074ba0bb5713022758a59d] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=35.5 K 2024-11-19T12:19:24,707 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting b949f68c19f94a96ab75da1b367c0b5d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732018761779 2024-11-19T12:19:24,707 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 294e35f3e2164bd3982fa35779c1a7f9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732018761723 2024-11-19T12:19:24,707 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting d680890f0a0c43e482c3bf2f6eaf1503, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732018762429 2024-11-19T12:19:24,707 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting ff41c1b13bde41e2af0231ca8bfc26b8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732018761779 2024-11-19T12:19:24,707 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting fd0500b014074ba0bb5713022758a59d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732018762429 2024-11-19T12:19:24,713 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:24,714 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#B#compaction#339 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:24,714 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/1f29eea1b3aa43b58fd35534b58826a2 is 50, key is test_row_0/B:col10/1732018763045/Put/seqid=0 2024-11-19T12:19:24,715 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411194958489b75134e13a637db18978d3cd7_c569d7fdb1587d224050261fa2ec2f58 store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:24,716 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411194958489b75134e13a637db18978d3cd7_c569d7fdb1587d224050261fa2ec2f58, store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:24,717 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411194958489b75134e13a637db18978d3cd7_c569d7fdb1587d224050261fa2ec2f58 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:24,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742222_1398 (size=12459) 2024-11-19T12:19:24,729 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/1f29eea1b3aa43b58fd35534b58826a2 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/1f29eea1b3aa43b58fd35534b58826a2 2024-11-19T12:19:24,734 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/B of c569d7fdb1587d224050261fa2ec2f58 into 1f29eea1b3aa43b58fd35534b58826a2(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:24,734 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:24,734 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/B, priority=13, startTime=1732018764704; duration=0sec 2024-11-19T12:19:24,734 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:24,734 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:B 2024-11-19T12:19:24,734 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:24,735 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:24,735 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/C is initiating minor compaction (all files) 2024-11-19T12:19:24,735 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/C in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:24,735 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/0fa77f0f2a51447ab5d9827f0de81ada, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/c328a23dbb3646bfb750694db2545e20, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/131338719d4c4a2ca1a8ea7cf87c5e53] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=35.5 K 2024-11-19T12:19:24,736 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fa77f0f2a51447ab5d9827f0de81ada, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732018761723 2024-11-19T12:19:24,737 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting c328a23dbb3646bfb750694db2545e20, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732018761779 2024-11-19T12:19:24,738 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 131338719d4c4a2ca1a8ea7cf87c5e53, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732018762429 2024-11-19T12:19:24,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742223_1399 (size=4469) 2024-11-19T12:19:24,739 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#A#compaction#340 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:24,740 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/dccab51bd9714193b8f0fb61384db504 is 175, key is test_row_0/A:col10/1732018763045/Put/seqid=0 2024-11-19T12:19:24,745 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#C#compaction#341 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:24,745 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/f9462655abd44d44b452a8f54a2359ff is 50, key is test_row_0/C:col10/1732018763045/Put/seqid=0 2024-11-19T12:19:24,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742224_1400 (size=31413) 2024-11-19T12:19:24,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742225_1401 (size=12459) 2024-11-19T12:19:24,852 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:24,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-19T12:19:24,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:24,852 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing c569d7fdb1587d224050261fa2ec2f58 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-19T12:19:24,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=A 2024-11-19T12:19:24,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:24,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=B 2024-11-19T12:19:24,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:24,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=C 2024-11-19T12:19:24,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:24,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111975cfe01c0e2f4564beda77d9c0f7f15f_c569d7fdb1587d224050261fa2ec2f58 is 50, key is test_row_0/A:col10/1732018763072/Put/seqid=0 2024-11-19T12:19:24,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742226_1402 (size=12304) 2024-11-19T12:19:24,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-19T12:19:25,153 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/dccab51bd9714193b8f0fb61384db504 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/dccab51bd9714193b8f0fb61384db504 2024-11-19T12:19:25,159 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/A of c569d7fdb1587d224050261fa2ec2f58 into dccab51bd9714193b8f0fb61384db504(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:25,159 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:25,159 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/A, priority=13, startTime=1732018764704; duration=0sec 2024-11-19T12:19:25,159 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:25,159 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:A 2024-11-19T12:19:25,163 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/f9462655abd44d44b452a8f54a2359ff as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/f9462655abd44d44b452a8f54a2359ff 2024-11-19T12:19:25,167 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/C of c569d7fdb1587d224050261fa2ec2f58 into f9462655abd44d44b452a8f54a2359ff(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:25,167 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:25,167 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/C, priority=13, startTime=1732018764704; duration=0sec 2024-11-19T12:19:25,167 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:25,167 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:C 2024-11-19T12:19:25,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:25,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:25,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:25,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018825221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:25,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:25,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018825221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:25,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:25,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018825223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:25,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:25,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018825223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:25,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:25,268 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111975cfe01c0e2f4564beda77d9c0f7f15f_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111975cfe01c0e2f4564beda77d9c0f7f15f_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:25,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/52987bcb1fbd4706bd7f59fdfa4b2e60, store: [table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:25,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/52987bcb1fbd4706bd7f59fdfa4b2e60 is 175, key is test_row_0/A:col10/1732018763072/Put/seqid=0 2024-11-19T12:19:25,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742227_1403 (size=31105) 2024-11-19T12:19:25,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:25,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018825325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:25,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:25,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018825327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:25,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:25,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018825328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:25,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:25,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018825328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:25,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:25,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018825531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:25,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:25,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018825532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:25,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:25,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:25,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018825532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:25,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018825532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:25,675 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/52987bcb1fbd4706bd7f59fdfa4b2e60 2024-11-19T12:19:25,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/c6bdad8485bb4decb88d90af54067bd7 is 50, key is test_row_0/B:col10/1732018763072/Put/seqid=0 2024-11-19T12:19:25,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742228_1404 (size=12151) 2024-11-19T12:19:25,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:25,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018825837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:25,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:25,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018825838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:25,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:25,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018825839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:25,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:25,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018825839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:25,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-19T12:19:26,087 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/c6bdad8485bb4decb88d90af54067bd7 2024-11-19T12:19:26,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/a760ac3634474a54bead0be147609c61 is 50, key is test_row_0/C:col10/1732018763072/Put/seqid=0 2024-11-19T12:19:26,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742229_1405 (size=12151) 2024-11-19T12:19:26,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:26,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018826341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:26,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:26,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018826343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:26,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:26,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018826344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:26,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:26,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018826345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:26,499 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/a760ac3634474a54bead0be147609c61 2024-11-19T12:19:26,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/52987bcb1fbd4706bd7f59fdfa4b2e60 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/52987bcb1fbd4706bd7f59fdfa4b2e60 2024-11-19T12:19:26,506 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/52987bcb1fbd4706bd7f59fdfa4b2e60, entries=150, sequenceid=157, filesize=30.4 K 2024-11-19T12:19:26,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/c6bdad8485bb4decb88d90af54067bd7 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/c6bdad8485bb4decb88d90af54067bd7 2024-11-19T12:19:26,510 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/c6bdad8485bb4decb88d90af54067bd7, entries=150, sequenceid=157, filesize=11.9 K 2024-11-19T12:19:26,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/a760ac3634474a54bead0be147609c61 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/a760ac3634474a54bead0be147609c61 2024-11-19T12:19:26,514 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/a760ac3634474a54bead0be147609c61, entries=150, sequenceid=157, filesize=11.9 K 2024-11-19T12:19:26,514 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for c569d7fdb1587d224050261fa2ec2f58 in 1662ms, sequenceid=157, compaction requested=false 2024-11-19T12:19:26,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:26,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:26,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-19T12:19:26,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-19T12:19:26,517 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-19T12:19:26,517 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7290 sec 2024-11-19T12:19:26,518 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 2.7340 sec 2024-11-19T12:19:27,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:27,347 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c569d7fdb1587d224050261fa2ec2f58 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-19T12:19:27,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=A 2024-11-19T12:19:27,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:27,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=B 2024-11-19T12:19:27,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:27,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=C 2024-11-19T12:19:27,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:27,355 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119571bb0c539d6476aa288e8d4a37dd23f_c569d7fdb1587d224050261fa2ec2f58 is 50, key is test_row_0/A:col10/1732018765220/Put/seqid=0 2024-11-19T12:19:27,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742230_1406 (size=14794) 2024-11-19T12:19:27,383 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:27,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018827377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:27,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:27,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018827378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:27,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:27,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018827378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:27,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:27,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018827379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:27,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:27,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018827484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:27,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:27,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018827487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:27,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:27,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018827487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:27,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:27,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018827488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:27,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:27,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018827688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:27,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:27,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018827693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:27,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:27,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018827693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:27,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:27,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018827693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:27,760 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:27,768 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119571bb0c539d6476aa288e8d4a37dd23f_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119571bb0c539d6476aa288e8d4a37dd23f_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:27,774 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/cec392521550466187191c6be58742ad, store: [table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:27,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/cec392521550466187191c6be58742ad is 175, key is test_row_0/A:col10/1732018765220/Put/seqid=0 2024-11-19T12:19:27,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742231_1407 (size=39749) 2024-11-19T12:19:27,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-19T12:19:27,889 INFO [Thread-1634 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-19T12:19:27,890 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:19:27,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-19T12:19:27,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-19T12:19:27,892 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:19:27,893 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:19:27,893 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:19:27,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-19T12:19:27,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:27,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018827992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:27,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:27,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018827997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:27,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:27,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018827997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:27,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:27,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018827997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:28,044 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:28,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-19T12:19:28,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:28,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:28,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:28,045 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,182 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=175, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/cec392521550466187191c6be58742ad 2024-11-19T12:19:28,190 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/cabaa405b362471ebc75890983715557 is 50, key is test_row_0/B:col10/1732018765220/Put/seqid=0 2024-11-19T12:19:28,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742232_1408 (size=12151) 2024-11-19T12:19:28,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-19T12:19:28,197 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:28,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-19T12:19:28,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:28,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:28,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:28,198 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,350 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:28,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-19T12:19:28,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:28,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:28,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:28,350 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-19T12:19:28,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:28,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018828499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:28,502 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:28,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-19T12:19:28,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:28,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:28,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:28,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:28,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018828501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:28,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:28,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018828501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:28,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:28,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018828504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:28,594 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/cabaa405b362471ebc75890983715557 2024-11-19T12:19:28,601 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/b15af7a68dba4b558f080fb54d24d2f6 is 50, key is test_row_0/C:col10/1732018765220/Put/seqid=0 2024-11-19T12:19:28,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742233_1409 (size=12151) 2024-11-19T12:19:28,655 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:28,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-19T12:19:28,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:28,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:28,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:28,656 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,807 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:28,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-19T12:19:28,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:28,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:28,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:28,808 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,960 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:28,961 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-19T12:19:28,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:28,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:28,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:28,961 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:28,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-19T12:19:29,005 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/b15af7a68dba4b558f080fb54d24d2f6 2024-11-19T12:19:29,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/cec392521550466187191c6be58742ad as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/cec392521550466187191c6be58742ad 2024-11-19T12:19:29,013 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/cec392521550466187191c6be58742ad, entries=200, sequenceid=175, filesize=38.8 K 2024-11-19T12:19:29,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/cabaa405b362471ebc75890983715557 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/cabaa405b362471ebc75890983715557 2024-11-19T12:19:29,017 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/cabaa405b362471ebc75890983715557, entries=150, sequenceid=175, filesize=11.9 K 2024-11-19T12:19:29,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/b15af7a68dba4b558f080fb54d24d2f6 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/b15af7a68dba4b558f080fb54d24d2f6 2024-11-19T12:19:29,023 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/b15af7a68dba4b558f080fb54d24d2f6, entries=150, sequenceid=175, filesize=11.9 K 2024-11-19T12:19:29,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for c569d7fdb1587d224050261fa2ec2f58 in 1676ms, sequenceid=175, compaction requested=true 2024-11-19T12:19:29,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:29,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:19:29,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:29,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:19:29,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:29,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:19:29,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-19T12:19:29,024 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:29,024 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:29,025 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102267 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:29,025 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:29,025 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/A is initiating minor compaction (all files) 2024-11-19T12:19:29,025 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/B is initiating minor compaction (all files) 2024-11-19T12:19:29,025 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/A in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:29,025 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/B in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:29,025 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/dccab51bd9714193b8f0fb61384db504, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/52987bcb1fbd4706bd7f59fdfa4b2e60, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/cec392521550466187191c6be58742ad] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=99.9 K 2024-11-19T12:19:29,025 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/1f29eea1b3aa43b58fd35534b58826a2, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/c6bdad8485bb4decb88d90af54067bd7, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/cabaa405b362471ebc75890983715557] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=35.9 K 2024-11-19T12:19:29,025 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:29,025 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/dccab51bd9714193b8f0fb61384db504, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/52987bcb1fbd4706bd7f59fdfa4b2e60, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/cec392521550466187191c6be58742ad] 2024-11-19T12:19:29,025 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f29eea1b3aa43b58fd35534b58826a2, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732018762429 2024-11-19T12:19:29,026 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting dccab51bd9714193b8f0fb61384db504, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732018762429 2024-11-19T12:19:29,026 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting c6bdad8485bb4decb88d90af54067bd7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732018763071 2024-11-19T12:19:29,026 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 52987bcb1fbd4706bd7f59fdfa4b2e60, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732018763071 2024-11-19T12:19:29,026 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting cabaa405b362471ebc75890983715557, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732018765220 2024-11-19T12:19:29,026 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting cec392521550466187191c6be58742ad, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732018765220 2024-11-19T12:19:29,033 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#B#compaction#348 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:29,033 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/c448baec468d49f5a6d2bc8232efc305 is 50, key is test_row_0/B:col10/1732018765220/Put/seqid=0 2024-11-19T12:19:29,034 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:29,036 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241119d5179688f6784d8aa1aeab70fbf5c912_c569d7fdb1587d224050261fa2ec2f58 store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:29,038 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241119d5179688f6784d8aa1aeab70fbf5c912_c569d7fdb1587d224050261fa2ec2f58, store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:29,038 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119d5179688f6784d8aa1aeab70fbf5c912_c569d7fdb1587d224050261fa2ec2f58 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:29,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742234_1410 (size=12561) 2024-11-19T12:19:29,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742235_1411 (size=4469) 2024-11-19T12:19:29,113 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:29,113 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-19T12:19:29,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:29,114 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing c569d7fdb1587d224050261fa2ec2f58 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-19T12:19:29,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=A 2024-11-19T12:19:29,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:29,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=B 2024-11-19T12:19:29,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:29,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=C 2024-11-19T12:19:29,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:29,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111956f07678c03e4668949701459874e5d3_c569d7fdb1587d224050261fa2ec2f58 is 50, key is test_row_0/A:col10/1732018767360/Put/seqid=0 2024-11-19T12:19:29,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742236_1412 (size=12304) 2024-11-19T12:19:29,444 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/c448baec468d49f5a6d2bc8232efc305 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/c448baec468d49f5a6d2bc8232efc305 2024-11-19T12:19:29,449 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/B of c569d7fdb1587d224050261fa2ec2f58 into c448baec468d49f5a6d2bc8232efc305(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:29,449 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:29,449 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/B, priority=13, startTime=1732018769024; duration=0sec 2024-11-19T12:19:29,449 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:29,449 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:B 2024-11-19T12:19:29,449 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:29,451 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:29,451 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/C is initiating minor compaction (all files) 2024-11-19T12:19:29,451 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/C in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:29,451 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/f9462655abd44d44b452a8f54a2359ff, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/a760ac3634474a54bead0be147609c61, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/b15af7a68dba4b558f080fb54d24d2f6] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=35.9 K 2024-11-19T12:19:29,451 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9462655abd44d44b452a8f54a2359ff, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732018762429 2024-11-19T12:19:29,451 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#A#compaction#349 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:29,451 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting a760ac3634474a54bead0be147609c61, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732018763071 2024-11-19T12:19:29,452 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting b15af7a68dba4b558f080fb54d24d2f6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732018765220 2024-11-19T12:19:29,452 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/d04df25f9a3a45f887c0f6481b160d14 is 175, key is test_row_0/A:col10/1732018765220/Put/seqid=0 2024-11-19T12:19:29,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742237_1413 (size=31515) 2024-11-19T12:19:29,465 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#C#compaction#351 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:29,465 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/2eb6bcdb7e614b0589bc13f5692a712e is 50, key is test_row_0/C:col10/1732018765220/Put/seqid=0 2024-11-19T12:19:29,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742238_1414 (size=12561) 2024-11-19T12:19:29,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:29,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:29,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:29,537 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111956f07678c03e4668949701459874e5d3_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111956f07678c03e4668949701459874e5d3_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:29,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/1683c1761a7547e3adef14581ec575cc, store: [table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:29,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/1683c1761a7547e3adef14581ec575cc is 175, key is test_row_0/A:col10/1732018767360/Put/seqid=0 2024-11-19T12:19:29,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:29,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018829535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:29,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:29,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018829536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:29,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742239_1415 (size=31105) 2024-11-19T12:19:29,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:29,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018829540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:29,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:29,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018829541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:29,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:29,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018829642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:29,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:29,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018829643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:29,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:29,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018829647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:29,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:29,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018829647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:29,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:29,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018829848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:29,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:29,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018829849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:29,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:29,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018829849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:29,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:29,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018829850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:29,865 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/d04df25f9a3a45f887c0f6481b160d14 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/d04df25f9a3a45f887c0f6481b160d14 2024-11-19T12:19:29,869 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/A of c569d7fdb1587d224050261fa2ec2f58 into d04df25f9a3a45f887c0f6481b160d14(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:29,870 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:29,870 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/A, priority=13, startTime=1732018769023; duration=0sec 2024-11-19T12:19:29,870 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:29,870 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:A 2024-11-19T12:19:29,881 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/2eb6bcdb7e614b0589bc13f5692a712e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/2eb6bcdb7e614b0589bc13f5692a712e 2024-11-19T12:19:29,885 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/C of c569d7fdb1587d224050261fa2ec2f58 into 2eb6bcdb7e614b0589bc13f5692a712e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:29,885 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:29,885 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/C, priority=13, startTime=1732018769024; duration=0sec 2024-11-19T12:19:29,886 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:29,886 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:C 2024-11-19T12:19:29,943 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=195, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/1683c1761a7547e3adef14581ec575cc 2024-11-19T12:19:29,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/50486294c0bc42dca329308acc9182c0 is 50, key is test_row_0/B:col10/1732018767360/Put/seqid=0 2024-11-19T12:19:29,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742240_1416 (size=12151) 2024-11-19T12:19:29,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-19T12:19:30,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:30,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018830152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:30,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:30,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018830152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:30,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:30,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018830152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:30,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:30,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018830156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:30,353 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/50486294c0bc42dca329308acc9182c0 2024-11-19T12:19:30,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/ab9f98257f234ecea8fde4f92fefba18 is 50, key is test_row_0/C:col10/1732018767360/Put/seqid=0 2024-11-19T12:19:30,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742241_1417 (size=12151) 2024-11-19T12:19:30,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:30,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018830657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:30,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:30,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018830657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:30,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:30,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018830658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:30,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018830661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:30,764 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/ab9f98257f234ecea8fde4f92fefba18 2024-11-19T12:19:30,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/1683c1761a7547e3adef14581ec575cc as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/1683c1761a7547e3adef14581ec575cc 2024-11-19T12:19:30,772 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/1683c1761a7547e3adef14581ec575cc, entries=150, sequenceid=195, filesize=30.4 K 2024-11-19T12:19:30,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/50486294c0bc42dca329308acc9182c0 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/50486294c0bc42dca329308acc9182c0 2024-11-19T12:19:30,778 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/50486294c0bc42dca329308acc9182c0, entries=150, sequenceid=195, filesize=11.9 K 2024-11-19T12:19:30,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/ab9f98257f234ecea8fde4f92fefba18 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/ab9f98257f234ecea8fde4f92fefba18 2024-11-19T12:19:30,782 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/ab9f98257f234ecea8fde4f92fefba18, entries=150, sequenceid=195, filesize=11.9 K 2024-11-19T12:19:30,783 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for c569d7fdb1587d224050261fa2ec2f58 in 1668ms, sequenceid=195, compaction requested=false 2024-11-19T12:19:30,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:30,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:30,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-19T12:19:30,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-19T12:19:30,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-19T12:19:30,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8910 sec 2024-11-19T12:19:30,786 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 2.8950 sec 2024-11-19T12:19:31,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:31,670 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c569d7fdb1587d224050261fa2ec2f58 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-19T12:19:31,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=A 2024-11-19T12:19:31,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:31,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=B 2024-11-19T12:19:31,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:31,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=C 2024-11-19T12:19:31,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:31,677 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411191fa43808cd514795b9be0336bc7d2a7f_c569d7fdb1587d224050261fa2ec2f58 is 50, key is test_row_0/A:col10/1732018769524/Put/seqid=0 2024-11-19T12:19:31,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742242_1418 (size=14794) 2024-11-19T12:19:31,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:31,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018831691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:31,701 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:31,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018831695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:31,701 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:31,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018831695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:31,701 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:31,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018831696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:31,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:31,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018831798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:31,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018831801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:31,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018831802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:31,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:31,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018831802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:31,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-19T12:19:31,999 INFO [Thread-1634 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-19T12:19:32,000 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:19:32,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-19T12:19:32,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-19T12:19:32,002 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:19:32,002 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:19:32,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:19:32,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:32,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018832006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:32,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:32,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018832006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:32,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:32,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018832007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:32,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:32,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018832007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:32,081 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:32,085 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411191fa43808cd514795b9be0336bc7d2a7f_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411191fa43808cd514795b9be0336bc7d2a7f_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:32,086 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/84151259f805424492f1a55e180eeb8d, store: [table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:32,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/84151259f805424492f1a55e180eeb8d is 175, key is test_row_0/A:col10/1732018769524/Put/seqid=0 2024-11-19T12:19:32,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742243_1419 (size=39749) 2024-11-19T12:19:32,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-19T12:19:32,153 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:32,154 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-19T12:19:32,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:32,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:32,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:32,154 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:32,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:32,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:32,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-19T12:19:32,306 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:32,307 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-19T12:19:32,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:32,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:32,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:32,307 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:32,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:32,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:32,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:32,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018832310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:32,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:32,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018832311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:32,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:32,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018832311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:32,315 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:32,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018832311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:32,459 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:32,459 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-19T12:19:32,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:32,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:32,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:32,459 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:32,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:32,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:32,491 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=215, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/84151259f805424492f1a55e180eeb8d 2024-11-19T12:19:32,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/6ef5eeeda71445a3b627d752fe07be9e is 50, key is test_row_0/B:col10/1732018769524/Put/seqid=0 2024-11-19T12:19:32,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742244_1420 (size=12151) 2024-11-19T12:19:32,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-19T12:19:32,611 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:32,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-19T12:19:32,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:32,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:32,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:32,612 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:32,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:32,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:32,764 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:32,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-19T12:19:32,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:32,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:32,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:32,765 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:32,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:32,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:32,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:32,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018832816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:32,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:32,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018832816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:32,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:32,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018832817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:32,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:32,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018832819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:32,901 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/6ef5eeeda71445a3b627d752fe07be9e 2024-11-19T12:19:32,908 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/db4d7141c88a465da42bfb661eb871db is 50, key is test_row_0/C:col10/1732018769524/Put/seqid=0 2024-11-19T12:19:32,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742245_1421 (size=12151) 2024-11-19T12:19:32,916 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:32,917 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-19T12:19:32,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:32,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:32,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:32,917 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:32,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:32,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:33,069 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:33,069 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-19T12:19:33,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:33,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:33,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:33,070 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:33,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:33,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:33,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-19T12:19:33,222 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:33,222 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-19T12:19:33,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:33,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:33,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:33,222 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:33,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:33,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:33,312 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/db4d7141c88a465da42bfb661eb871db 2024-11-19T12:19:33,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/84151259f805424492f1a55e180eeb8d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/84151259f805424492f1a55e180eeb8d 2024-11-19T12:19:33,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/84151259f805424492f1a55e180eeb8d, entries=200, sequenceid=215, filesize=38.8 K 2024-11-19T12:19:33,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/6ef5eeeda71445a3b627d752fe07be9e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/6ef5eeeda71445a3b627d752fe07be9e 2024-11-19T12:19:33,324 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/6ef5eeeda71445a3b627d752fe07be9e, entries=150, sequenceid=215, filesize=11.9 K 2024-11-19T12:19:33,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/db4d7141c88a465da42bfb661eb871db as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/db4d7141c88a465da42bfb661eb871db 2024-11-19T12:19:33,328 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/db4d7141c88a465da42bfb661eb871db, entries=150, sequenceid=215, filesize=11.9 K 2024-11-19T12:19:33,329 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for c569d7fdb1587d224050261fa2ec2f58 in 1659ms, sequenceid=215, compaction requested=true 2024-11-19T12:19:33,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:33,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:19:33,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:33,329 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:33,329 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:33,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:19:33,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:33,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:19:33,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:33,330 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:33,330 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:33,330 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/A is initiating minor compaction (all files) 2024-11-19T12:19:33,330 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/B is initiating minor compaction (all files) 2024-11-19T12:19:33,330 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/A in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:33,330 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/B in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:33,330 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/c448baec468d49f5a6d2bc8232efc305, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/50486294c0bc42dca329308acc9182c0, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/6ef5eeeda71445a3b627d752fe07be9e] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=36.0 K 2024-11-19T12:19:33,330 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/d04df25f9a3a45f887c0f6481b160d14, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/1683c1761a7547e3adef14581ec575cc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/84151259f805424492f1a55e180eeb8d] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=100.0 K 2024-11-19T12:19:33,330 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:33,331 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/d04df25f9a3a45f887c0f6481b160d14, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/1683c1761a7547e3adef14581ec575cc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/84151259f805424492f1a55e180eeb8d] 2024-11-19T12:19:33,331 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting c448baec468d49f5a6d2bc8232efc305, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732018765220 2024-11-19T12:19:33,331 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting d04df25f9a3a45f887c0f6481b160d14, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732018765220 2024-11-19T12:19:33,331 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 50486294c0bc42dca329308acc9182c0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732018767360 2024-11-19T12:19:33,331 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1683c1761a7547e3adef14581ec575cc, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732018767360 2024-11-19T12:19:33,331 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ef5eeeda71445a3b627d752fe07be9e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732018769524 2024-11-19T12:19:33,331 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84151259f805424492f1a55e180eeb8d, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732018769524 2024-11-19T12:19:33,337 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:33,338 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#B#compaction#357 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:33,338 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/7f1c5d062f6b4152ac8027279f092498 is 50, key is test_row_0/B:col10/1732018769524/Put/seqid=0 2024-11-19T12:19:33,340 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241119dcae5db2f13f4303ba1261a0c71db053_c569d7fdb1587d224050261fa2ec2f58 store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:33,341 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241119dcae5db2f13f4303ba1261a0c71db053_c569d7fdb1587d224050261fa2ec2f58, store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:33,341 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119dcae5db2f13f4303ba1261a0c71db053_c569d7fdb1587d224050261fa2ec2f58 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:33,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742246_1422 (size=12663) 2024-11-19T12:19:33,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742247_1423 (size=4469) 2024-11-19T12:19:33,370 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#A#compaction#358 average throughput is 0.74 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:33,371 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/891671e651f446f7b7ecf57ce9b4489e is 175, key is test_row_0/A:col10/1732018769524/Put/seqid=0 2024-11-19T12:19:33,374 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:33,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742248_1424 (size=31617) 2024-11-19T12:19:33,375 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-19T12:19:33,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:33,375 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing c569d7fdb1587d224050261fa2ec2f58 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-19T12:19:33,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=A 2024-11-19T12:19:33,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:33,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=B 2024-11-19T12:19:33,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:33,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=C 2024-11-19T12:19:33,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:33,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411194c7282b58754462bb58c3b2369ce6512_c569d7fdb1587d224050261fa2ec2f58 is 50, key is test_row_0/A:col10/1732018771695/Put/seqid=0 2024-11-19T12:19:33,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742249_1425 (size=12304) 2024-11-19T12:19:33,763 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/7f1c5d062f6b4152ac8027279f092498 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/7f1c5d062f6b4152ac8027279f092498 2024-11-19T12:19:33,767 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/B of c569d7fdb1587d224050261fa2ec2f58 into 7f1c5d062f6b4152ac8027279f092498(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:33,767 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:33,767 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/B, priority=13, startTime=1732018773329; duration=0sec 2024-11-19T12:19:33,768 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:33,768 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:B 2024-11-19T12:19:33,768 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:33,768 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:33,769 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/C is initiating minor compaction (all files) 2024-11-19T12:19:33,769 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/C in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:33,769 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/2eb6bcdb7e614b0589bc13f5692a712e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/ab9f98257f234ecea8fde4f92fefba18, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/db4d7141c88a465da42bfb661eb871db] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=36.0 K 2024-11-19T12:19:33,769 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 2eb6bcdb7e614b0589bc13f5692a712e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732018765220 2024-11-19T12:19:33,769 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting ab9f98257f234ecea8fde4f92fefba18, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732018767360 2024-11-19T12:19:33,770 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting db4d7141c88a465da42bfb661eb871db, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732018769524 2024-11-19T12:19:33,774 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#C#compaction#360 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:33,775 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/e5a741b7f93d48dfa0c8d52958868376 is 50, key is test_row_0/C:col10/1732018769524/Put/seqid=0 2024-11-19T12:19:33,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742250_1426 (size=12663) 2024-11-19T12:19:33,779 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/891671e651f446f7b7ecf57ce9b4489e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/891671e651f446f7b7ecf57ce9b4489e 2024-11-19T12:19:33,783 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/e5a741b7f93d48dfa0c8d52958868376 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/e5a741b7f93d48dfa0c8d52958868376 2024-11-19T12:19:33,783 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/A of c569d7fdb1587d224050261fa2ec2f58 into 891671e651f446f7b7ecf57ce9b4489e(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:33,783 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:33,783 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/A, priority=13, startTime=1732018773329; duration=0sec 2024-11-19T12:19:33,784 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:33,784 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:A 2024-11-19T12:19:33,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:33,788 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/C of c569d7fdb1587d224050261fa2ec2f58 into e5a741b7f93d48dfa0c8d52958868376(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:33,788 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:33,788 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/C, priority=13, startTime=1732018773329; duration=0sec 2024-11-19T12:19:33,788 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:33,788 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:C 2024-11-19T12:19:33,790 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411194c7282b58754462bb58c3b2369ce6512_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411194c7282b58754462bb58c3b2369ce6512_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:33,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/0d9fb48fd0844d4eb2d73ddfdfd68e01, store: [table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:33,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/0d9fb48fd0844d4eb2d73ddfdfd68e01 is 175, key is test_row_0/A:col10/1732018771695/Put/seqid=0 2024-11-19T12:19:33,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742251_1427 (size=31105) 2024-11-19T12:19:33,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:33,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:33,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:33,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018833841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:33,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:33,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018833867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:33,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:33,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018833867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:33,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:33,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018833867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:33,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:33,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018833968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:33,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:33,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018833972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:33,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:33,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018833973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:33,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:33,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018833973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:34,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-19T12:19:34,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:34,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018834174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:34,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:34,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018834176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:34,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:34,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018834177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:34,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:34,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018834177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:34,207 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=234, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/0d9fb48fd0844d4eb2d73ddfdfd68e01 2024-11-19T12:19:34,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/bca210946bdf4edc893eb929c0fae69c is 50, key is test_row_0/B:col10/1732018771695/Put/seqid=0 2024-11-19T12:19:34,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742252_1428 (size=12151) 2024-11-19T12:19:34,218 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/bca210946bdf4edc893eb929c0fae69c 2024-11-19T12:19:34,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/1a9eca50b1734e9a9405ce7dc8d2d8dd is 50, key is test_row_0/C:col10/1732018771695/Put/seqid=0 2024-11-19T12:19:34,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742253_1429 (size=12151) 2024-11-19T12:19:34,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:34,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45058 deadline: 1732018834417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:34,423 DEBUG [Thread-1628 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18243 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., hostname=af314c41f984,36047,1732018661455, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:19:34,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:34,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018834478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:34,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:34,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018834481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:34,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:34,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018834482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:34,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:34,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018834482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:34,627 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/1a9eca50b1734e9a9405ce7dc8d2d8dd 2024-11-19T12:19:34,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/0d9fb48fd0844d4eb2d73ddfdfd68e01 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/0d9fb48fd0844d4eb2d73ddfdfd68e01 2024-11-19T12:19:34,635 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/0d9fb48fd0844d4eb2d73ddfdfd68e01, entries=150, sequenceid=234, filesize=30.4 K 2024-11-19T12:19:34,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/bca210946bdf4edc893eb929c0fae69c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/bca210946bdf4edc893eb929c0fae69c 2024-11-19T12:19:34,639 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/bca210946bdf4edc893eb929c0fae69c, entries=150, sequenceid=234, filesize=11.9 K 2024-11-19T12:19:34,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/1a9eca50b1734e9a9405ce7dc8d2d8dd as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/1a9eca50b1734e9a9405ce7dc8d2d8dd 2024-11-19T12:19:34,643 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/1a9eca50b1734e9a9405ce7dc8d2d8dd, entries=150, sequenceid=234, filesize=11.9 K 2024-11-19T12:19:34,645 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=107.34 KB/109920 for c569d7fdb1587d224050261fa2ec2f58 in 1270ms, sequenceid=234, compaction requested=false 2024-11-19T12:19:34,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:34,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:34,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-19T12:19:34,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-19T12:19:34,649 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-19T12:19:34,649 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6450 sec 2024-11-19T12:19:34,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 2.6500 sec 2024-11-19T12:19:34,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:34,986 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c569d7fdb1587d224050261fa2ec2f58 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-19T12:19:34,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=A 2024-11-19T12:19:34,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:34,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=B 2024-11-19T12:19:34,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:34,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=C 2024-11-19T12:19:34,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:34,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411193fec18ef0aac4b3fa28a477bbda5653a_c569d7fdb1587d224050261fa2ec2f58 is 50, key is test_row_0/A:col10/1732018773840/Put/seqid=0 2024-11-19T12:19:34,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742254_1430 (size=14844) 2024-11-19T12:19:35,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:35,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018835003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:35,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:35,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018835006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:35,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:35,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018835007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:35,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:35,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018835008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:35,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:35,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018835112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:35,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:35,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018835114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:35,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:35,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018835114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:35,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:35,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018835114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:35,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:35,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018835315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:35,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:35,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018835317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:35,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:35,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018835320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:35,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:35,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018835320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:35,399 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:35,402 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411193fec18ef0aac4b3fa28a477bbda5653a_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411193fec18ef0aac4b3fa28a477bbda5653a_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:35,403 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/3c8698719d5048178ec13c936aa63df7, store: [table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:35,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/3c8698719d5048178ec13c936aa63df7 is 175, key is test_row_0/A:col10/1732018773840/Put/seqid=0 2024-11-19T12:19:35,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742255_1431 (size=39799) 2024-11-19T12:19:35,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:35,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018835619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:35,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:35,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018835621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:35,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:35,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018835625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:35,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:35,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018835627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:35,808 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=257, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/3c8698719d5048178ec13c936aa63df7 2024-11-19T12:19:35,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/60f5d71a80f34e408d26d4c1d1385a53 is 50, key is test_row_0/B:col10/1732018773840/Put/seqid=0 2024-11-19T12:19:35,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742256_1432 (size=12201) 2024-11-19T12:19:35,834 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/60f5d71a80f34e408d26d4c1d1385a53 2024-11-19T12:19:35,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/acfec01ccc4d41efa7b85cabd0cd933b is 50, key is test_row_0/C:col10/1732018773840/Put/seqid=0 2024-11-19T12:19:35,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742257_1433 (size=12201) 2024-11-19T12:19:35,848 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/acfec01ccc4d41efa7b85cabd0cd933b 2024-11-19T12:19:35,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/3c8698719d5048178ec13c936aa63df7 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/3c8698719d5048178ec13c936aa63df7 2024-11-19T12:19:35,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/3c8698719d5048178ec13c936aa63df7, entries=200, sequenceid=257, filesize=38.9 K 2024-11-19T12:19:35,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/60f5d71a80f34e408d26d4c1d1385a53 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/60f5d71a80f34e408d26d4c1d1385a53 2024-11-19T12:19:35,862 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/60f5d71a80f34e408d26d4c1d1385a53, entries=150, sequenceid=257, filesize=11.9 K 2024-11-19T12:19:35,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/acfec01ccc4d41efa7b85cabd0cd933b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/acfec01ccc4d41efa7b85cabd0cd933b 2024-11-19T12:19:35,867 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/acfec01ccc4d41efa7b85cabd0cd933b, entries=150, sequenceid=257, filesize=11.9 K 2024-11-19T12:19:35,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for c569d7fdb1587d224050261fa2ec2f58 in 881ms, sequenceid=257, compaction requested=true 2024-11-19T12:19:35,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:35,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:19:35,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:35,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:19:35,868 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:35,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:35,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c569d7fdb1587d224050261fa2ec2f58:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:19:35,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:35,868 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:35,869 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102521 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:35,869 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:35,869 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/A is initiating minor compaction (all files) 2024-11-19T12:19:35,869 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/B is initiating minor compaction (all files) 2024-11-19T12:19:35,869 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/B in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:35,869 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/A in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:35,869 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/7f1c5d062f6b4152ac8027279f092498, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/bca210946bdf4edc893eb929c0fae69c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/60f5d71a80f34e408d26d4c1d1385a53] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=36.1 K 2024-11-19T12:19:35,869 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/891671e651f446f7b7ecf57ce9b4489e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/0d9fb48fd0844d4eb2d73ddfdfd68e01, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/3c8698719d5048178ec13c936aa63df7] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=100.1 K 2024-11-19T12:19:35,869 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:35,869 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/891671e651f446f7b7ecf57ce9b4489e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/0d9fb48fd0844d4eb2d73ddfdfd68e01, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/3c8698719d5048178ec13c936aa63df7] 2024-11-19T12:19:35,870 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f1c5d062f6b4152ac8027279f092498, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732018769524 2024-11-19T12:19:35,870 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 891671e651f446f7b7ecf57ce9b4489e, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732018769524 2024-11-19T12:19:35,870 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting bca210946bdf4edc893eb929c0fae69c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732018771681 2024-11-19T12:19:35,870 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d9fb48fd0844d4eb2d73ddfdfd68e01, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732018771681 2024-11-19T12:19:35,870 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 60f5d71a80f34e408d26d4c1d1385a53, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1732018773840 2024-11-19T12:19:35,871 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c8698719d5048178ec13c936aa63df7, keycount=200, bloomtype=ROW, size=38.9 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1732018773838 2024-11-19T12:19:35,877 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:35,878 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#B#compaction#366 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:35,879 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/f668696ad6204243baa13c55093c4219 is 50, key is test_row_0/B:col10/1732018773840/Put/seqid=0 2024-11-19T12:19:35,881 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411196b6222c19ccd4b4fae8354b707150a3a_c569d7fdb1587d224050261fa2ec2f58 store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:35,882 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411196b6222c19ccd4b4fae8354b707150a3a_c569d7fdb1587d224050261fa2ec2f58, store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:35,882 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411196b6222c19ccd4b4fae8354b707150a3a_c569d7fdb1587d224050261fa2ec2f58 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:35,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742258_1434 (size=12815) 2024-11-19T12:19:35,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742259_1435 (size=4469) 2024-11-19T12:19:36,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-19T12:19:36,106 INFO [Thread-1634 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-19T12:19:36,107 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:19:36,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-11-19T12:19:36,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-19T12:19:36,108 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:19:36,109 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:19:36,109 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:19:36,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:36,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c569d7fdb1587d224050261fa2ec2f58 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-19T12:19:36,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=A 2024-11-19T12:19:36,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:36,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=B 2024-11-19T12:19:36,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:36,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=C 2024-11-19T12:19:36,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:36,136 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411196907f29308a647c0ab49d6263714aef8_c569d7fdb1587d224050261fa2ec2f58 is 50, key is test_row_0/A:col10/1732018775007/Put/seqid=0 2024-11-19T12:19:36,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742260_1436 (size=17534) 2024-11-19T12:19:36,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:36,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:36,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018836151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:36,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018836151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:36,157 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:36,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018836151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:36,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:36,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018836156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:36,188 DEBUG [Thread-1637 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x114e6211 to 127.0.0.1:64186 2024-11-19T12:19:36,188 DEBUG [Thread-1637 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:36,192 DEBUG [Thread-1635 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d930fb1 to 127.0.0.1:64186 2024-11-19T12:19:36,192 DEBUG [Thread-1635 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:36,193 DEBUG [Thread-1641 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x133cc1f0 to 127.0.0.1:64186 2024-11-19T12:19:36,193 DEBUG [Thread-1641 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:36,194 DEBUG [Thread-1639 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x191ae36a to 127.0.0.1:64186 2024-11-19T12:19:36,194 DEBUG [Thread-1639 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:36,198 DEBUG [Thread-1643 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6cfa4b91 to 127.0.0.1:64186 2024-11-19T12:19:36,198 DEBUG [Thread-1643 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:36,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-19T12:19:36,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:36,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:36,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:36,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018836258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:36,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018836258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:36,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018836258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:36,260 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:36,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-19T12:19:36,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:36,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:36,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:36,261 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:36,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:36,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:36,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:36,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018836265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:36,297 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/f668696ad6204243baa13c55093c4219 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/f668696ad6204243baa13c55093c4219 2024-11-19T12:19:36,301 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/B of c569d7fdb1587d224050261fa2ec2f58 into f668696ad6204243baa13c55093c4219(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:36,301 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:36,301 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/B, priority=13, startTime=1732018775868; duration=0sec 2024-11-19T12:19:36,301 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:36,301 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:B 2024-11-19T12:19:36,302 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:36,302 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#A#compaction#367 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:36,302 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:36,302 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): c569d7fdb1587d224050261fa2ec2f58/C is initiating minor compaction (all files) 2024-11-19T12:19:36,302 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c569d7fdb1587d224050261fa2ec2f58/C in TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:36,302 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/e5a741b7f93d48dfa0c8d52958868376, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/1a9eca50b1734e9a9405ce7dc8d2d8dd, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/acfec01ccc4d41efa7b85cabd0cd933b] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp, totalSize=36.1 K 2024-11-19T12:19:36,303 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/fff29642577a477398cadd477ab40659 is 175, key is test_row_0/A:col10/1732018773840/Put/seqid=0 2024-11-19T12:19:36,303 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting e5a741b7f93d48dfa0c8d52958868376, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732018769524 2024-11-19T12:19:36,303 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a9eca50b1734e9a9405ce7dc8d2d8dd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732018771681 2024-11-19T12:19:36,303 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting acfec01ccc4d41efa7b85cabd0cd933b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1732018773840 2024-11-19T12:19:36,309 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c569d7fdb1587d224050261fa2ec2f58#C#compaction#369 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:36,310 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/5f73734271264049a23c5b52a965d5b7 is 50, key is test_row_0/C:col10/1732018773840/Put/seqid=0 2024-11-19T12:19:36,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742261_1437 (size=31769) 2024-11-19T12:19:36,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742262_1438 (size=12815) 2024-11-19T12:19:36,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-19T12:19:36,413 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:36,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-19T12:19:36,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:36,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:36,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:36,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:36,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:36,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:36,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:36,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:36,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018836459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:36,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018836459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:36,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:36,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018836459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:36,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:36,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018836467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:36,540 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:36,543 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411196907f29308a647c0ab49d6263714aef8_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411196907f29308a647c0ab49d6263714aef8_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:36,544 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/f9cef6899d9147ab9c19fe9dff3918d3, store: [table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:36,544 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/f9cef6899d9147ab9c19fe9dff3918d3 is 175, key is test_row_0/A:col10/1732018775007/Put/seqid=0 2024-11-19T12:19:36,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742263_1439 (size=48639) 2024-11-19T12:19:36,566 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:36,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-19T12:19:36,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:36,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:36,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:36,566 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:36,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:36,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:36,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-19T12:19:36,714 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/fff29642577a477398cadd477ab40659 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/fff29642577a477398cadd477ab40659 2024-11-19T12:19:36,718 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:36,718 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/A of c569d7fdb1587d224050261fa2ec2f58 into fff29642577a477398cadd477ab40659(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:36,718 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:36,718 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/A, priority=13, startTime=1732018775867; duration=0sec 2024-11-19T12:19:36,718 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:36,718 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:A 2024-11-19T12:19:36,719 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-19T12:19:36,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:36,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:36,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:36,719 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:36,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:36,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:36,720 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/5f73734271264049a23c5b52a965d5b7 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/5f73734271264049a23c5b52a965d5b7 2024-11-19T12:19:36,724 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c569d7fdb1587d224050261fa2ec2f58/C of c569d7fdb1587d224050261fa2ec2f58 into 5f73734271264049a23c5b52a965d5b7(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:36,724 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:36,724 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58., storeName=c569d7fdb1587d224050261fa2ec2f58/C, priority=13, startTime=1732018775868; duration=0sec 2024-11-19T12:19:36,724 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:36,724 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c569d7fdb1587d224050261fa2ec2f58:C 2024-11-19T12:19:36,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:36,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018836761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:36,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:36,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018836761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:36,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:36,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018836762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:36,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:36,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018836768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:36,871 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:36,871 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-19T12:19:36,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:36,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:36,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:36,871 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:36,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:36,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:36,948 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=275, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/f9cef6899d9147ab9c19fe9dff3918d3 2024-11-19T12:19:36,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/7233b390cac44abfb639e335c6c5a2c3 is 50, key is test_row_0/B:col10/1732018775007/Put/seqid=0 2024-11-19T12:19:36,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742264_1440 (size=12301) 2024-11-19T12:19:37,023 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:37,023 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-19T12:19:37,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:37,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:37,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:37,024 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:37,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:37,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:37,175 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:37,176 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-19T12:19:37,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:37,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:37,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:37,176 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:37,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:37,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:37,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-19T12:19:37,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:37,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1732018837265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:37,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:37,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45030 deadline: 1732018837266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:37,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:37,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45040 deadline: 1732018837267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:37,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:37,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45054 deadline: 1732018837269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:37,328 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:37,328 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-19T12:19:37,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:37,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:37,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:37,328 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:37,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:37,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:37,357 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/7233b390cac44abfb639e335c6c5a2c3 2024-11-19T12:19:37,362 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/3ad06159926e404f97733c3948547b19 is 50, key is test_row_0/C:col10/1732018775007/Put/seqid=0 2024-11-19T12:19:37,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742265_1441 (size=12301) 2024-11-19T12:19:37,480 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:37,480 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-19T12:19:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:37,481 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:37,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:37,633 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:37,633 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-19T12:19:37,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:37,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:37,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:37,633 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:37,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:37,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:37,766 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/3ad06159926e404f97733c3948547b19 2024-11-19T12:19:37,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/f9cef6899d9147ab9c19fe9dff3918d3 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/f9cef6899d9147ab9c19fe9dff3918d3 2024-11-19T12:19:37,773 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/f9cef6899d9147ab9c19fe9dff3918d3, entries=250, sequenceid=275, filesize=47.5 K 2024-11-19T12:19:37,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/7233b390cac44abfb639e335c6c5a2c3 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/7233b390cac44abfb639e335c6c5a2c3 2024-11-19T12:19:37,776 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/7233b390cac44abfb639e335c6c5a2c3, entries=150, sequenceid=275, filesize=12.0 K 2024-11-19T12:19:37,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/3ad06159926e404f97733c3948547b19 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/3ad06159926e404f97733c3948547b19 2024-11-19T12:19:37,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/3ad06159926e404f97733c3948547b19, entries=150, sequenceid=275, filesize=12.0 K 2024-11-19T12:19:37,780 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for c569d7fdb1587d224050261fa2ec2f58 in 1652ms, sequenceid=275, compaction requested=false 2024-11-19T12:19:37,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:37,785 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:37,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-19T12:19:37,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:37,786 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing c569d7fdb1587d224050261fa2ec2f58 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-19T12:19:37,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=A 2024-11-19T12:19:37,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:37,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=B 2024-11-19T12:19:37,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:37,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=C 2024-11-19T12:19:37,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:37,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119d44d77be0e5b41d5a588396e7e4f084c_c569d7fdb1587d224050261fa2ec2f58 is 50, key is test_row_0/A:col10/1732018776142/Put/seqid=0 2024-11-19T12:19:37,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742266_1442 (size=12454) 2024-11-19T12:19:38,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:38,197 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119d44d77be0e5b41d5a588396e7e4f084c_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119d44d77be0e5b41d5a588396e7e4f084c_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:38,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/184a4cdcd5c348ccba362195f7387d8e, store: [table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:38,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/184a4cdcd5c348ccba362195f7387d8e is 175, key is test_row_0/A:col10/1732018776142/Put/seqid=0 2024-11-19T12:19:38,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742267_1443 (size=31255) 2024-11-19T12:19:38,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-19T12:19:38,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:38,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. as already flushing 2024-11-19T12:19:38,271 DEBUG [Thread-1624 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x081cac4f to 127.0.0.1:64186 2024-11-19T12:19:38,271 DEBUG [Thread-1624 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:38,273 DEBUG [Thread-1626 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x64a04d7a to 127.0.0.1:64186 2024-11-19T12:19:38,273 DEBUG [Thread-1626 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:38,274 DEBUG [Thread-1632 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x54ed1e8a to 127.0.0.1:64186 2024-11-19T12:19:38,274 DEBUG [Thread-1632 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:38,277 DEBUG [Thread-1630 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d7912a0 to 127.0.0.1:64186 2024-11-19T12:19:38,277 DEBUG [Thread-1630 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:38,602 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=296, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/184a4cdcd5c348ccba362195f7387d8e 2024-11-19T12:19:38,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/3c910fb805de45b2baf60199b849ae9e is 50, key is test_row_0/B:col10/1732018776142/Put/seqid=0 2024-11-19T12:19:38,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742268_1444 (size=12301) 2024-11-19T12:19:39,011 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/3c910fb805de45b2baf60199b849ae9e 2024-11-19T12:19:39,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/b7eb1334cb71453a879c2949b416f59b is 50, key is test_row_0/C:col10/1732018776142/Put/seqid=0 2024-11-19T12:19:39,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742269_1445 (size=12301) 2024-11-19T12:19:39,420 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/b7eb1334cb71453a879c2949b416f59b 2024-11-19T12:19:39,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/184a4cdcd5c348ccba362195f7387d8e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/184a4cdcd5c348ccba362195f7387d8e 2024-11-19T12:19:39,425 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/184a4cdcd5c348ccba362195f7387d8e, entries=150, sequenceid=296, filesize=30.5 K 2024-11-19T12:19:39,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/3c910fb805de45b2baf60199b849ae9e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/3c910fb805de45b2baf60199b849ae9e 2024-11-19T12:19:39,428 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/3c910fb805de45b2baf60199b849ae9e, entries=150, sequenceid=296, filesize=12.0 K 2024-11-19T12:19:39,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/b7eb1334cb71453a879c2949b416f59b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/b7eb1334cb71453a879c2949b416f59b 2024-11-19T12:19:39,431 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/b7eb1334cb71453a879c2949b416f59b, entries=150, sequenceid=296, filesize=12.0 K 2024-11-19T12:19:39,431 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=26.84 KB/27480 for c569d7fdb1587d224050261fa2ec2f58 in 1645ms, sequenceid=296, compaction requested=true 2024-11-19T12:19:39,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:39,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:39,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-19T12:19:39,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-11-19T12:19:39,433 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-19T12:19:39,433 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3230 sec 2024-11-19T12:19:39,434 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 3.3270 sec 2024-11-19T12:19:39,928 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T12:19:40,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-19T12:19:40,212 INFO [Thread-1634 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-19T12:19:44,372 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/57836298a7ee4019983eae641342b984, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/206a753b9262459f85fc5cff26a07e44, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/17769644803f4f2bb8b6c00edd862b7b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/ee68509521b145a69bfc789bf8a45f75, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/c49458a8c8a541fb8d81523edfff61b1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/780efa5f51484541b07ae5c5413880bc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/32c37611fa814c9dbd3c852526a6529a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/b949f68c19f94a96ab75da1b367c0b5d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/dccab51bd9714193b8f0fb61384db504, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/d680890f0a0c43e482c3bf2f6eaf1503, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/52987bcb1fbd4706bd7f59fdfa4b2e60, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/cec392521550466187191c6be58742ad, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/d04df25f9a3a45f887c0f6481b160d14, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/1683c1761a7547e3adef14581ec575cc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/84151259f805424492f1a55e180eeb8d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/891671e651f446f7b7ecf57ce9b4489e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/0d9fb48fd0844d4eb2d73ddfdfd68e01, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/3c8698719d5048178ec13c936aa63df7] to archive 2024-11-19T12:19:44,372 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:19:44,374 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/57836298a7ee4019983eae641342b984 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/57836298a7ee4019983eae641342b984 2024-11-19T12:19:44,374 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/206a753b9262459f85fc5cff26a07e44 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/206a753b9262459f85fc5cff26a07e44 2024-11-19T12:19:44,375 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/17769644803f4f2bb8b6c00edd862b7b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/17769644803f4f2bb8b6c00edd862b7b 2024-11-19T12:19:44,376 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/ee68509521b145a69bfc789bf8a45f75 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/ee68509521b145a69bfc789bf8a45f75 2024-11-19T12:19:44,377 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/c49458a8c8a541fb8d81523edfff61b1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/c49458a8c8a541fb8d81523edfff61b1 2024-11-19T12:19:44,378 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/780efa5f51484541b07ae5c5413880bc to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/780efa5f51484541b07ae5c5413880bc 2024-11-19T12:19:44,378 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/32c37611fa814c9dbd3c852526a6529a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/32c37611fa814c9dbd3c852526a6529a 2024-11-19T12:19:44,379 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/b949f68c19f94a96ab75da1b367c0b5d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/b949f68c19f94a96ab75da1b367c0b5d 2024-11-19T12:19:44,380 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/dccab51bd9714193b8f0fb61384db504 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/dccab51bd9714193b8f0fb61384db504 2024-11-19T12:19:44,381 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/d680890f0a0c43e482c3bf2f6eaf1503 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/d680890f0a0c43e482c3bf2f6eaf1503 2024-11-19T12:19:44,382 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/52987bcb1fbd4706bd7f59fdfa4b2e60 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/52987bcb1fbd4706bd7f59fdfa4b2e60 2024-11-19T12:19:44,382 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/cec392521550466187191c6be58742ad to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/cec392521550466187191c6be58742ad 2024-11-19T12:19:44,383 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/d04df25f9a3a45f887c0f6481b160d14 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/d04df25f9a3a45f887c0f6481b160d14 2024-11-19T12:19:44,384 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/1683c1761a7547e3adef14581ec575cc to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/1683c1761a7547e3adef14581ec575cc 2024-11-19T12:19:44,385 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/84151259f805424492f1a55e180eeb8d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/84151259f805424492f1a55e180eeb8d 2024-11-19T12:19:44,386 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/891671e651f446f7b7ecf57ce9b4489e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/891671e651f446f7b7ecf57ce9b4489e 2024-11-19T12:19:44,386 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/0d9fb48fd0844d4eb2d73ddfdfd68e01 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/0d9fb48fd0844d4eb2d73ddfdfd68e01 2024-11-19T12:19:44,387 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/3c8698719d5048178ec13c936aa63df7 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/3c8698719d5048178ec13c936aa63df7 2024-11-19T12:19:44,389 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/462934de600e44e998571a9a3f8a6434, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/57958f547f96421da277787ee7b8e9f4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/0e1be06b7f0640728cf7d641fea6c795, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/bc53b04783034c86a538b561ce48467f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/c965fe9e7d364688aa232aaf1b33949e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/294e35f3e2164bd3982fa35779c1a7f9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/182e14ff1628409b864db2d95586712d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/ff41c1b13bde41e2af0231ca8bfc26b8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/1f29eea1b3aa43b58fd35534b58826a2, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/fd0500b014074ba0bb5713022758a59d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/c6bdad8485bb4decb88d90af54067bd7, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/c448baec468d49f5a6d2bc8232efc305, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/cabaa405b362471ebc75890983715557, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/50486294c0bc42dca329308acc9182c0, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/7f1c5d062f6b4152ac8027279f092498, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/6ef5eeeda71445a3b627d752fe07be9e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/bca210946bdf4edc893eb929c0fae69c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/60f5d71a80f34e408d26d4c1d1385a53] to archive 2024-11-19T12:19:44,390 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:19:44,391 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/462934de600e44e998571a9a3f8a6434 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/462934de600e44e998571a9a3f8a6434 2024-11-19T12:19:44,392 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/57958f547f96421da277787ee7b8e9f4 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/57958f547f96421da277787ee7b8e9f4 2024-11-19T12:19:44,393 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/0e1be06b7f0640728cf7d641fea6c795 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/0e1be06b7f0640728cf7d641fea6c795 2024-11-19T12:19:44,393 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/bc53b04783034c86a538b561ce48467f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/bc53b04783034c86a538b561ce48467f 2024-11-19T12:19:44,394 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/c965fe9e7d364688aa232aaf1b33949e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/c965fe9e7d364688aa232aaf1b33949e 2024-11-19T12:19:44,395 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/294e35f3e2164bd3982fa35779c1a7f9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/294e35f3e2164bd3982fa35779c1a7f9 2024-11-19T12:19:44,396 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/182e14ff1628409b864db2d95586712d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/182e14ff1628409b864db2d95586712d 2024-11-19T12:19:44,397 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/ff41c1b13bde41e2af0231ca8bfc26b8 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/ff41c1b13bde41e2af0231ca8bfc26b8 2024-11-19T12:19:44,398 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/1f29eea1b3aa43b58fd35534b58826a2 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/1f29eea1b3aa43b58fd35534b58826a2 2024-11-19T12:19:44,398 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/fd0500b014074ba0bb5713022758a59d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/fd0500b014074ba0bb5713022758a59d 2024-11-19T12:19:44,399 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/c6bdad8485bb4decb88d90af54067bd7 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/c6bdad8485bb4decb88d90af54067bd7 2024-11-19T12:19:44,400 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/c448baec468d49f5a6d2bc8232efc305 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/c448baec468d49f5a6d2bc8232efc305 2024-11-19T12:19:44,401 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/cabaa405b362471ebc75890983715557 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/cabaa405b362471ebc75890983715557 2024-11-19T12:19:44,402 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/50486294c0bc42dca329308acc9182c0 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/50486294c0bc42dca329308acc9182c0 2024-11-19T12:19:44,402 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/7f1c5d062f6b4152ac8027279f092498 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/7f1c5d062f6b4152ac8027279f092498 2024-11-19T12:19:44,403 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/6ef5eeeda71445a3b627d752fe07be9e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/6ef5eeeda71445a3b627d752fe07be9e 2024-11-19T12:19:44,404 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/bca210946bdf4edc893eb929c0fae69c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/bca210946bdf4edc893eb929c0fae69c 2024-11-19T12:19:44,405 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/60f5d71a80f34e408d26d4c1d1385a53 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/60f5d71a80f34e408d26d4c1d1385a53 2024-11-19T12:19:44,407 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/486f542f214c4dfe852de1f65bb14c1e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/baaeabbdbf724fcb8fe7c5e25f434f4b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/c8876b8e31974f658fee2a2098eb2262, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/cff0ae6a20c746b6b0d77027fbaf4763, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/5acd1606575847f68a1c09f19747a05e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/0fa77f0f2a51447ab5d9827f0de81ada, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/d37e5bda609a4fcfa6eb3b0f4867da06, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/c328a23dbb3646bfb750694db2545e20, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/f9462655abd44d44b452a8f54a2359ff, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/131338719d4c4a2ca1a8ea7cf87c5e53, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/a760ac3634474a54bead0be147609c61, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/2eb6bcdb7e614b0589bc13f5692a712e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/b15af7a68dba4b558f080fb54d24d2f6, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/ab9f98257f234ecea8fde4f92fefba18, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/e5a741b7f93d48dfa0c8d52958868376, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/db4d7141c88a465da42bfb661eb871db, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/1a9eca50b1734e9a9405ce7dc8d2d8dd, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/acfec01ccc4d41efa7b85cabd0cd933b] to archive 2024-11-19T12:19:44,407 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:19:44,408 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/486f542f214c4dfe852de1f65bb14c1e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/486f542f214c4dfe852de1f65bb14c1e 2024-11-19T12:19:44,409 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/baaeabbdbf724fcb8fe7c5e25f434f4b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/baaeabbdbf724fcb8fe7c5e25f434f4b 2024-11-19T12:19:44,410 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/c8876b8e31974f658fee2a2098eb2262 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/c8876b8e31974f658fee2a2098eb2262 2024-11-19T12:19:44,411 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/cff0ae6a20c746b6b0d77027fbaf4763 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/cff0ae6a20c746b6b0d77027fbaf4763 2024-11-19T12:19:44,411 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/5acd1606575847f68a1c09f19747a05e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/5acd1606575847f68a1c09f19747a05e 2024-11-19T12:19:44,412 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/0fa77f0f2a51447ab5d9827f0de81ada to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/0fa77f0f2a51447ab5d9827f0de81ada 2024-11-19T12:19:44,413 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/d37e5bda609a4fcfa6eb3b0f4867da06 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/d37e5bda609a4fcfa6eb3b0f4867da06 2024-11-19T12:19:44,413 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/c328a23dbb3646bfb750694db2545e20 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/c328a23dbb3646bfb750694db2545e20 2024-11-19T12:19:44,414 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/f9462655abd44d44b452a8f54a2359ff to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/f9462655abd44d44b452a8f54a2359ff 2024-11-19T12:19:44,415 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/131338719d4c4a2ca1a8ea7cf87c5e53 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/131338719d4c4a2ca1a8ea7cf87c5e53 2024-11-19T12:19:44,416 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/a760ac3634474a54bead0be147609c61 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/a760ac3634474a54bead0be147609c61 2024-11-19T12:19:44,417 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/2eb6bcdb7e614b0589bc13f5692a712e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/2eb6bcdb7e614b0589bc13f5692a712e 2024-11-19T12:19:44,418 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/b15af7a68dba4b558f080fb54d24d2f6 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/b15af7a68dba4b558f080fb54d24d2f6 2024-11-19T12:19:44,419 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/ab9f98257f234ecea8fde4f92fefba18 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/ab9f98257f234ecea8fde4f92fefba18 2024-11-19T12:19:44,420 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/e5a741b7f93d48dfa0c8d52958868376 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/e5a741b7f93d48dfa0c8d52958868376 2024-11-19T12:19:44,421 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/db4d7141c88a465da42bfb661eb871db to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/db4d7141c88a465da42bfb661eb871db 2024-11-19T12:19:44,422 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/1a9eca50b1734e9a9405ce7dc8d2d8dd to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/1a9eca50b1734e9a9405ce7dc8d2d8dd 2024-11-19T12:19:44,423 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/af314c41f984:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/acfec01ccc4d41efa7b85cabd0cd933b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/acfec01ccc4d41efa7b85cabd0cd933b 2024-11-19T12:19:44,454 DEBUG [Thread-1628 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3268230a to 127.0.0.1:64186 2024-11-19T12:19:44,454 DEBUG [Thread-1628 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 48 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 1 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2791 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8373 rows 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2780 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8340 rows 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2780 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8340 rows 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2800 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8400 rows 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2775 2024-11-19T12:19:44,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8325 rows 2024-11-19T12:19:44,454 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-19T12:19:44,454 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7267b857 to 127.0.0.1:64186 2024-11-19T12:19:44,454 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:19:44,458 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-19T12:19:44,459 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-19T12:19:44,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-19T12:19:44,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-19T12:19:44,462 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018784462"}]},"ts":"1732018784462"} 2024-11-19T12:19:44,463 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-19T12:19:44,465 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-19T12:19:44,466 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-19T12:19:44,467 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c569d7fdb1587d224050261fa2ec2f58, UNASSIGN}] 2024-11-19T12:19:44,467 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c569d7fdb1587d224050261fa2ec2f58, UNASSIGN 2024-11-19T12:19:44,468 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=c569d7fdb1587d224050261fa2ec2f58, regionState=CLOSING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:19:44,469 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-19T12:19:44,469 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; CloseRegionProcedure c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:19:44,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-19T12:19:44,620 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:44,620 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(124): Close c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:44,621 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-19T12:19:44,621 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1681): Closing c569d7fdb1587d224050261fa2ec2f58, disabling compactions & flushes 2024-11-19T12:19:44,621 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:44,621 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:44,621 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. after waiting 0 ms 2024-11-19T12:19:44,621 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:44,621 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(2837): Flushing c569d7fdb1587d224050261fa2ec2f58 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-19T12:19:44,621 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=A 2024-11-19T12:19:44,621 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:44,621 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=B 2024-11-19T12:19:44,621 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:44,621 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c569d7fdb1587d224050261fa2ec2f58, store=C 2024-11-19T12:19:44,621 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:44,626 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411196e3c73ee41e141678147eae9de3c3d6c_c569d7fdb1587d224050261fa2ec2f58 is 50, key is test_row_0/A:col10/1732018778272/Put/seqid=0 2024-11-19T12:19:44,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742270_1446 (size=12454) 2024-11-19T12:19:44,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-19T12:19:45,030 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:45,034 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411196e3c73ee41e141678147eae9de3c3d6c_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411196e3c73ee41e141678147eae9de3c3d6c_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:45,034 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/07e2a343d1db4ad9864958bbf769939c, store: [table=TestAcidGuarantees family=A region=c569d7fdb1587d224050261fa2ec2f58] 2024-11-19T12:19:45,035 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/07e2a343d1db4ad9864958bbf769939c is 175, key is test_row_0/A:col10/1732018778272/Put/seqid=0 2024-11-19T12:19:45,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742271_1447 (size=31255) 2024-11-19T12:19:45,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-19T12:19:45,439 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=304, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/07e2a343d1db4ad9864958bbf769939c 2024-11-19T12:19:45,444 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/6e7e6f36030d4fe2a8c5fb791efb7119 is 50, key is test_row_0/B:col10/1732018778272/Put/seqid=0 2024-11-19T12:19:45,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742272_1448 (size=12301) 2024-11-19T12:19:45,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-19T12:19:45,848 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/6e7e6f36030d4fe2a8c5fb791efb7119 2024-11-19T12:19:45,853 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/c6898714bab84ecd9254a918532f76c5 is 50, key is test_row_0/C:col10/1732018778272/Put/seqid=0 2024-11-19T12:19:45,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742273_1449 (size=12301) 2024-11-19T12:19:46,257 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/c6898714bab84ecd9254a918532f76c5 2024-11-19T12:19:46,262 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/A/07e2a343d1db4ad9864958bbf769939c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/07e2a343d1db4ad9864958bbf769939c 2024-11-19T12:19:46,265 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/07e2a343d1db4ad9864958bbf769939c, entries=150, sequenceid=304, filesize=30.5 K 2024-11-19T12:19:46,265 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/B/6e7e6f36030d4fe2a8c5fb791efb7119 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/6e7e6f36030d4fe2a8c5fb791efb7119 2024-11-19T12:19:46,268 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/6e7e6f36030d4fe2a8c5fb791efb7119, entries=150, sequenceid=304, filesize=12.0 K 2024-11-19T12:19:46,269 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/.tmp/C/c6898714bab84ecd9254a918532f76c5 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/c6898714bab84ecd9254a918532f76c5 2024-11-19T12:19:46,271 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/c6898714bab84ecd9254a918532f76c5, entries=150, sequenceid=304, filesize=12.0 K 2024-11-19T12:19:46,272 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for c569d7fdb1587d224050261fa2ec2f58 in 1651ms, sequenceid=304, compaction requested=true 2024-11-19T12:19:46,276 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/recovered.edits/307.seqid, newMaxSeqId=307, maxSeqId=4 2024-11-19T12:19:46,276 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58. 2024-11-19T12:19:46,276 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1635): Region close journal for c569d7fdb1587d224050261fa2ec2f58: 2024-11-19T12:19:46,278 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(170): Closed c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,278 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=c569d7fdb1587d224050261fa2ec2f58, regionState=CLOSED 2024-11-19T12:19:46,280 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-19T12:19:46,280 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseRegionProcedure c569d7fdb1587d224050261fa2ec2f58, server=af314c41f984,36047,1732018661455 in 1.8100 sec 2024-11-19T12:19:46,282 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=124, resume processing ppid=123 2024-11-19T12:19:46,282 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, ppid=123, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c569d7fdb1587d224050261fa2ec2f58, UNASSIGN in 1.8130 sec 2024-11-19T12:19:46,283 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-19T12:19:46,283 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8160 sec 2024-11-19T12:19:46,284 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018786284"}]},"ts":"1732018786284"} 2024-11-19T12:19:46,285 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-19T12:19:46,286 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-19T12:19:46,288 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8280 sec 2024-11-19T12:19:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-19T12:19:46,565 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-19T12:19:46,565 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-19T12:19:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:19:46,567 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:19:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-19T12:19:46,567 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=126, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:19:46,568 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,570 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/recovered.edits] 2024-11-19T12:19:46,572 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/07e2a343d1db4ad9864958bbf769939c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/07e2a343d1db4ad9864958bbf769939c 2024-11-19T12:19:46,573 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/184a4cdcd5c348ccba362195f7387d8e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/184a4cdcd5c348ccba362195f7387d8e 2024-11-19T12:19:46,574 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/f9cef6899d9147ab9c19fe9dff3918d3 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/f9cef6899d9147ab9c19fe9dff3918d3 2024-11-19T12:19:46,574 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/fff29642577a477398cadd477ab40659 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/A/fff29642577a477398cadd477ab40659 2024-11-19T12:19:46,576 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/3c910fb805de45b2baf60199b849ae9e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/3c910fb805de45b2baf60199b849ae9e 2024-11-19T12:19:46,577 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/6e7e6f36030d4fe2a8c5fb791efb7119 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/6e7e6f36030d4fe2a8c5fb791efb7119 2024-11-19T12:19:46,578 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/7233b390cac44abfb639e335c6c5a2c3 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/7233b390cac44abfb639e335c6c5a2c3 2024-11-19T12:19:46,578 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/f668696ad6204243baa13c55093c4219 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/B/f668696ad6204243baa13c55093c4219 2024-11-19T12:19:46,580 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/3ad06159926e404f97733c3948547b19 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/3ad06159926e404f97733c3948547b19 2024-11-19T12:19:46,582 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/5f73734271264049a23c5b52a965d5b7 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/5f73734271264049a23c5b52a965d5b7 2024-11-19T12:19:46,583 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/b7eb1334cb71453a879c2949b416f59b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/b7eb1334cb71453a879c2949b416f59b 2024-11-19T12:19:46,584 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/c6898714bab84ecd9254a918532f76c5 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/C/c6898714bab84ecd9254a918532f76c5 2024-11-19T12:19:46,586 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/recovered.edits/307.seqid to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58/recovered.edits/307.seqid 2024-11-19T12:19:46,586 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,586 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-19T12:19:46,587 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-19T12:19:46,587 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-19T12:19:46,590 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411191fa43808cd514795b9be0336bc7d2a7f_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411191fa43808cd514795b9be0336bc7d2a7f_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,590 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411193ce68bab026b4b28b96ec52062629ffe_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411193ce68bab026b4b28b96ec52062629ffe_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,591 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411193fec18ef0aac4b3fa28a477bbda5653a_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411193fec18ef0aac4b3fa28a477bbda5653a_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,592 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411194c7282b58754462bb58c3b2369ce6512_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411194c7282b58754462bb58c3b2369ce6512_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,593 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111956f07678c03e4668949701459874e5d3_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111956f07678c03e4668949701459874e5d3_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,594 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119571bb0c539d6476aa288e8d4a37dd23f_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119571bb0c539d6476aa288e8d4a37dd23f_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,595 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411195c6cc251ff374f8a9fd3043aa3ba8ef3_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411195c6cc251ff374f8a9fd3043aa3ba8ef3_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,596 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411196907f29308a647c0ab49d6263714aef8_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411196907f29308a647c0ab49d6263714aef8_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,596 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411196e3c73ee41e141678147eae9de3c3d6c_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411196e3c73ee41e141678147eae9de3c3d6c_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,597 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111975cfe01c0e2f4564beda77d9c0f7f15f_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111975cfe01c0e2f4564beda77d9c0f7f15f_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,598 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411197e5bd4ced32045aeba1577848b815d67_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411197e5bd4ced32045aeba1577848b815d67_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,599 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411198b60c96dc9a44b7abc0455f48d18d338_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411198b60c96dc9a44b7abc0455f48d18d338_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,600 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119a0ca8de898234ea999037fb27329e998_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119a0ca8de898234ea999037fb27329e998_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,600 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119b8a27b26b4d1495c9f403966fec9e3ae_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119b8a27b26b4d1495c9f403966fec9e3ae_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,601 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119b9f45e4867a040cdb32bf7b642d279a1_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119b9f45e4867a040cdb32bf7b642d279a1_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,602 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119d44d77be0e5b41d5a588396e7e4f084c_c569d7fdb1587d224050261fa2ec2f58 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119d44d77be0e5b41d5a588396e7e4f084c_c569d7fdb1587d224050261fa2ec2f58 2024-11-19T12:19:46,602 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-19T12:19:46,604 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=126, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:19:46,605 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-19T12:19:46,607 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-19T12:19:46,608 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=126, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:19:46,608 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-19T12:19:46,608 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732018786608"}]},"ts":"9223372036854775807"} 2024-11-19T12:19:46,609 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-19T12:19:46,609 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => c569d7fdb1587d224050261fa2ec2f58, NAME => 'TestAcidGuarantees,,1732018753090.c569d7fdb1587d224050261fa2ec2f58.', STARTKEY => '', ENDKEY => ''}] 2024-11-19T12:19:46,609 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-19T12:19:46,609 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732018786609"}]},"ts":"9223372036854775807"} 2024-11-19T12:19:46,610 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-19T12:19:46,612 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=126, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:19:46,613 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 47 msec 2024-11-19T12:19:46,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-19T12:19:46,668 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-19T12:19:46,678 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=237 (was 237), OpenFileDescriptor=448 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=420 (was 454), ProcessCount=11 (was 11), AvailableMemoryMB=2688 (was 2314) - AvailableMemoryMB LEAK? - 2024-11-19T12:19:46,685 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=237, OpenFileDescriptor=448, MaxFileDescriptor=1048576, SystemLoadAverage=420, ProcessCount=11, AvailableMemoryMB=2848 2024-11-19T12:19:46,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-19T12:19:46,687 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:19:46,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-19T12:19:46,688 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T12:19:46,688 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:46,688 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 127 2024-11-19T12:19:46,689 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T12:19:46,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-19T12:19:46,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742274_1450 (size=960) 2024-11-19T12:19:46,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-19T12:19:46,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-19T12:19:47,095 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22 2024-11-19T12:19:47,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742275_1451 (size=53) 2024-11-19T12:19:47,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-19T12:19:47,500 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:19:47,500 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing ed69f29417e1710e50942d07ba24647d, disabling compactions & flushes 2024-11-19T12:19:47,501 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:47,501 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:47,501 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. after waiting 0 ms 2024-11-19T12:19:47,501 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:47,501 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:47,501 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:47,501 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T12:19:47,502 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732018787501"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732018787501"}]},"ts":"1732018787501"} 2024-11-19T12:19:47,503 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-19T12:19:47,503 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T12:19:47,504 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018787503"}]},"ts":"1732018787503"} 2024-11-19T12:19:47,504 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-19T12:19:47,509 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed69f29417e1710e50942d07ba24647d, ASSIGN}] 2024-11-19T12:19:47,510 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed69f29417e1710e50942d07ba24647d, ASSIGN 2024-11-19T12:19:47,511 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed69f29417e1710e50942d07ba24647d, ASSIGN; state=OFFLINE, location=af314c41f984,36047,1732018661455; forceNewPlan=false, retain=false 2024-11-19T12:19:47,661 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=ed69f29417e1710e50942d07ba24647d, regionState=OPENING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:19:47,662 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; OpenRegionProcedure ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:19:47,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-19T12:19:47,813 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:47,815 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:47,815 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7285): Opening region: {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:19:47,816 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:47,816 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:19:47,816 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7327): checking encryption for ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:47,816 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7330): checking classloading for ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:47,817 INFO [StoreOpener-ed69f29417e1710e50942d07ba24647d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:47,818 INFO [StoreOpener-ed69f29417e1710e50942d07ba24647d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:19:47,818 INFO [StoreOpener-ed69f29417e1710e50942d07ba24647d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ed69f29417e1710e50942d07ba24647d columnFamilyName A 2024-11-19T12:19:47,818 DEBUG [StoreOpener-ed69f29417e1710e50942d07ba24647d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:47,819 INFO [StoreOpener-ed69f29417e1710e50942d07ba24647d-1 {}] regionserver.HStore(327): Store=ed69f29417e1710e50942d07ba24647d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:19:47,819 INFO [StoreOpener-ed69f29417e1710e50942d07ba24647d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:47,820 INFO [StoreOpener-ed69f29417e1710e50942d07ba24647d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:19:47,820 INFO [StoreOpener-ed69f29417e1710e50942d07ba24647d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ed69f29417e1710e50942d07ba24647d columnFamilyName B 2024-11-19T12:19:47,820 DEBUG [StoreOpener-ed69f29417e1710e50942d07ba24647d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:47,820 INFO [StoreOpener-ed69f29417e1710e50942d07ba24647d-1 {}] regionserver.HStore(327): Store=ed69f29417e1710e50942d07ba24647d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:19:47,820 INFO [StoreOpener-ed69f29417e1710e50942d07ba24647d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:47,821 INFO [StoreOpener-ed69f29417e1710e50942d07ba24647d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:19:47,821 INFO [StoreOpener-ed69f29417e1710e50942d07ba24647d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ed69f29417e1710e50942d07ba24647d columnFamilyName C 2024-11-19T12:19:47,821 DEBUG [StoreOpener-ed69f29417e1710e50942d07ba24647d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:19:47,821 INFO [StoreOpener-ed69f29417e1710e50942d07ba24647d-1 {}] regionserver.HStore(327): Store=ed69f29417e1710e50942d07ba24647d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:19:47,822 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:47,822 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:47,822 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:47,823 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:19:47,824 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1085): writing seq id for ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:47,826 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:19:47,826 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1102): Opened ed69f29417e1710e50942d07ba24647d; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71339213, jitterRate=0.06303711235523224}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:19:47,827 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1001): Region open journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:47,827 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., pid=129, masterSystemTime=1732018787813 2024-11-19T12:19:47,828 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:47,828 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:47,829 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=ed69f29417e1710e50942d07ba24647d, regionState=OPEN, openSeqNum=2, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:19:47,831 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-19T12:19:47,831 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; OpenRegionProcedure ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 in 167 msec 2024-11-19T12:19:47,832 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=128, resume processing ppid=127 2024-11-19T12:19:47,832 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, ppid=127, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed69f29417e1710e50942d07ba24647d, ASSIGN in 322 msec 2024-11-19T12:19:47,832 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T12:19:47,832 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018787832"}]},"ts":"1732018787832"} 2024-11-19T12:19:47,833 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-19T12:19:47,835 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T12:19:47,836 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1480 sec 2024-11-19T12:19:48,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-19T12:19:48,792 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 127 completed 2024-11-19T12:19:48,793 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x46c37647 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f21f55d 2024-11-19T12:19:48,796 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21f67a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:48,797 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:48,798 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35192, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:48,799 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T12:19:48,800 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33496, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T12:19:48,802 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2fb24d40 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7f69def6 2024-11-19T12:19:48,805 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d5fe744, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:48,806 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51453050 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@60eadae0 2024-11-19T12:19:48,808 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@721d647e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:48,808 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x632d1806 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@55a6e359 2024-11-19T12:19:48,810 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c014307, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:48,811 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4f99adfe to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d47237f 2024-11-19T12:19:48,813 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b9854ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:48,814 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x562e0db7 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@42e690d6 2024-11-19T12:19:48,816 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b72a92d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:48,816 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3abeec20 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@44fb119b 2024-11-19T12:19:48,818 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44462a02, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:48,819 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x00df2701 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c349948 2024-11-19T12:19:48,821 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69d7a6f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:48,822 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x35ca71a1 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d0c5089 2024-11-19T12:19:48,824 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5938a7c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:48,825 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x56a4483a to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3943c27f 2024-11-19T12:19:48,828 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25593478, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:48,828 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4a5128bb to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6da65bb4 2024-11-19T12:19:48,830 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@130588c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:19:48,836 DEBUG [hconnection-0x6e2d59c3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:48,836 DEBUG [hconnection-0x6eea4e57-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:48,837 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35196, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:48,837 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35200, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:48,840 DEBUG [hconnection-0x3ff0583-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:48,841 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35208, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:48,844 DEBUG [hconnection-0x72cedb8a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:48,844 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35224, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:48,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:48,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed69f29417e1710e50942d07ba24647d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-19T12:19:48,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=A 2024-11-19T12:19:48,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:48,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=B 2024-11-19T12:19:48,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:48,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=C 2024-11-19T12:19:48,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:48,852 DEBUG [hconnection-0x1fe11fbd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:48,853 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35226, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:48,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:48,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:48,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018848858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:48,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018848858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:48,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:48,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018848858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:48,860 DEBUG [hconnection-0x30760e41-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:48,860 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35240, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:48,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:48,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018848861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:48,864 DEBUG [hconnection-0x4e4c1219-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:48,864 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35252, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:48,871 DEBUG [hconnection-0x2a3a9fa3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:48,872 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35254, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:48,873 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:19:48,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:48,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35254 deadline: 1732018848873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:48,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-19T12:19:48,874 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:19:48,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-19T12:19:48,875 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:19:48,875 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:19:48,880 DEBUG [hconnection-0x2da02a2f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:48,880 DEBUG [hconnection-0x60f31045-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:19:48,881 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35260, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:48,881 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35258, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:19:48,885 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/95efa2d640cd4ae2a7fdaf74285d839c is 50, key is test_row_0/A:col10/1732018788845/Put/seqid=0 2024-11-19T12:19:48,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742276_1452 (size=12001) 2024-11-19T12:19:48,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:48,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018848959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:48,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:48,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018848959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:48,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:48,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018848959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:48,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:48,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018848963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:48,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-19T12:19:48,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:48,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35254 deadline: 1732018848974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:49,025 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:49,025 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-19T12:19:49,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:49,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:49,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:49,026 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:49,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018849160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:49,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:49,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018849161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:49,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:49,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018849161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:49,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:49,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018849165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:49,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-19T12:19:49,177 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:49,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-19T12:19:49,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:49,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:49,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:49,178 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:49,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35254 deadline: 1732018849177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:49,297 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/95efa2d640cd4ae2a7fdaf74285d839c 2024-11-19T12:19:49,323 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/0c1575c4231d4732a08f9c1f364d2654 is 50, key is test_row_0/B:col10/1732018788845/Put/seqid=0 2024-11-19T12:19:49,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742277_1453 (size=12001) 2024-11-19T12:19:49,330 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:49,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-19T12:19:49,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:49,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:49,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:49,331 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:49,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018849463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:49,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:49,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018849465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:49,470 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:49,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018849466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:49,471 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:49,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018849470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:49,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-19T12:19:49,482 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:49,482 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-19T12:19:49,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:49,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:49,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:49,483 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:49,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35254 deadline: 1732018849482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:49,634 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:49,635 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-19T12:19:49,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:49,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:49,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:49,635 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,727 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/0c1575c4231d4732a08f9c1f364d2654 2024-11-19T12:19:49,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/34bfd0265c7c4ec99aee8466bef18979 is 50, key is test_row_0/C:col10/1732018788845/Put/seqid=0 2024-11-19T12:19:49,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742278_1454 (size=12001) 2024-11-19T12:19:49,787 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:49,787 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-19T12:19:49,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:49,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:49,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:49,787 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,939 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:49,939 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-19T12:19:49,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:49,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:49,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:49,940 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:49,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:49,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018849966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:49,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:49,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018849972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:49,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:49,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018849973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:49,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:49,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018849974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:49,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-19T12:19:49,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:49,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35254 deadline: 1732018849990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:50,091 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:50,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-19T12:19:50,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:50,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:50,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:50,092 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:50,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:50,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:50,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/34bfd0265c7c4ec99aee8466bef18979 2024-11-19T12:19:50,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/95efa2d640cd4ae2a7fdaf74285d839c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/95efa2d640cd4ae2a7fdaf74285d839c 2024-11-19T12:19:50,162 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/95efa2d640cd4ae2a7fdaf74285d839c, entries=150, sequenceid=13, filesize=11.7 K 2024-11-19T12:19:50,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/0c1575c4231d4732a08f9c1f364d2654 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/0c1575c4231d4732a08f9c1f364d2654 2024-11-19T12:19:50,167 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/0c1575c4231d4732a08f9c1f364d2654, entries=150, sequenceid=13, filesize=11.7 K 2024-11-19T12:19:50,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/34bfd0265c7c4ec99aee8466bef18979 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/34bfd0265c7c4ec99aee8466bef18979 2024-11-19T12:19:50,170 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/34bfd0265c7c4ec99aee8466bef18979, entries=150, sequenceid=13, filesize=11.7 K 2024-11-19T12:19:50,171 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for ed69f29417e1710e50942d07ba24647d in 1325ms, sequenceid=13, compaction requested=false 2024-11-19T12:19:50,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:50,244 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:50,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-19T12:19:50,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:50,245 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing ed69f29417e1710e50942d07ba24647d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-19T12:19:50,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=A 2024-11-19T12:19:50,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:50,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=B 2024-11-19T12:19:50,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:50,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=C 2024-11-19T12:19:50,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:50,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/282510c956ee4f1d9ceae78d0a067d5b is 50, key is test_row_0/A:col10/1732018788857/Put/seqid=0 2024-11-19T12:19:50,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742279_1455 (size=12001) 2024-11-19T12:19:50,654 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/282510c956ee4f1d9ceae78d0a067d5b 2024-11-19T12:19:50,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/6cc51792fd3649ec82a0a32cb8cca4b8 is 50, key is test_row_0/B:col10/1732018788857/Put/seqid=0 2024-11-19T12:19:50,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742280_1456 (size=12001) 2024-11-19T12:19:50,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:50,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:50,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-19T12:19:51,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018850981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35254 deadline: 1732018850999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018851008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018851008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018851008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,066 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/6cc51792fd3649ec82a0a32cb8cca4b8 2024-11-19T12:19:51,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/ede33286641942ff88b510e3587f5f65 is 50, key is test_row_0/C:col10/1732018788857/Put/seqid=0 2024-11-19T12:19:51,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742281_1457 (size=12001) 2024-11-19T12:19:51,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018851108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018851112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018851112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018851113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018851312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018851315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018851315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018851316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,476 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/ede33286641942ff88b510e3587f5f65 2024-11-19T12:19:51,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/282510c956ee4f1d9ceae78d0a067d5b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/282510c956ee4f1d9ceae78d0a067d5b 2024-11-19T12:19:51,483 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/282510c956ee4f1d9ceae78d0a067d5b, entries=150, sequenceid=38, filesize=11.7 K 2024-11-19T12:19:51,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/6cc51792fd3649ec82a0a32cb8cca4b8 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/6cc51792fd3649ec82a0a32cb8cca4b8 2024-11-19T12:19:51,487 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/6cc51792fd3649ec82a0a32cb8cca4b8, entries=150, sequenceid=38, filesize=11.7 K 2024-11-19T12:19:51,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/ede33286641942ff88b510e3587f5f65 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/ede33286641942ff88b510e3587f5f65 2024-11-19T12:19:51,490 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/ede33286641942ff88b510e3587f5f65, entries=150, sequenceid=38, filesize=11.7 K 2024-11-19T12:19:51,491 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for ed69f29417e1710e50942d07ba24647d in 1246ms, sequenceid=38, compaction requested=false 2024-11-19T12:19:51,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:51,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:51,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-19T12:19:51,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-19T12:19:51,493 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-19T12:19:51,493 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6170 sec 2024-11-19T12:19:51,494 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 2.6200 sec 2024-11-19T12:19:51,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:51,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed69f29417e1710e50942d07ba24647d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-19T12:19:51,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=A 2024-11-19T12:19:51,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:51,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=B 2024-11-19T12:19:51,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:51,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=C 2024-11-19T12:19:51,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:51,627 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/1c16e16079724da7a69dce4a68dd3074 is 50, key is test_row_0/A:col10/1732018791622/Put/seqid=0 2024-11-19T12:19:51,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742282_1458 (size=16681) 2024-11-19T12:19:51,638 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/1c16e16079724da7a69dce4a68dd3074 2024-11-19T12:19:51,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018851643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/4d4efab6ee154fd8ad89b934bfb82cd5 is 50, key is test_row_0/B:col10/1732018791622/Put/seqid=0 2024-11-19T12:19:51,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018851647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742283_1459 (size=12001) 2024-11-19T12:19:51,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/4d4efab6ee154fd8ad89b934bfb82cd5 2024-11-19T12:19:51,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018851648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018851650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/ff64bdff174f40ce8d23f7f4882ee937 is 50, key is test_row_0/C:col10/1732018791622/Put/seqid=0 2024-11-19T12:19:51,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742284_1460 (size=12001) 2024-11-19T12:19:51,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018851750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018851752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018851757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018851757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,796 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-19T12:19:51,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018851954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018851957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018851960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:51,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:51,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018851960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:52,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/ff64bdff174f40ce8d23f7f4882ee937 2024-11-19T12:19:52,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/1c16e16079724da7a69dce4a68dd3074 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/1c16e16079724da7a69dce4a68dd3074 2024-11-19T12:19:52,071 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/1c16e16079724da7a69dce4a68dd3074, entries=250, sequenceid=52, filesize=16.3 K 2024-11-19T12:19:52,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/4d4efab6ee154fd8ad89b934bfb82cd5 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/4d4efab6ee154fd8ad89b934bfb82cd5 2024-11-19T12:19:52,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/4d4efab6ee154fd8ad89b934bfb82cd5, entries=150, sequenceid=52, filesize=11.7 K 2024-11-19T12:19:52,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/ff64bdff174f40ce8d23f7f4882ee937 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/ff64bdff174f40ce8d23f7f4882ee937 2024-11-19T12:19:52,078 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/ff64bdff174f40ce8d23f7f4882ee937, entries=150, sequenceid=52, filesize=11.7 K 2024-11-19T12:19:52,079 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for ed69f29417e1710e50942d07ba24647d in 458ms, sequenceid=52, compaction requested=true 2024-11-19T12:19:52,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:52,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:19:52,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:52,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:19:52,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:52,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:19:52,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:52,079 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:52,079 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:52,080 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:52,080 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:52,080 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/B is initiating minor compaction (all files) 2024-11-19T12:19:52,080 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/A is initiating minor compaction (all files) 2024-11-19T12:19:52,080 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/B in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:52,080 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/A in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:52,080 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/95efa2d640cd4ae2a7fdaf74285d839c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/282510c956ee4f1d9ceae78d0a067d5b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/1c16e16079724da7a69dce4a68dd3074] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=39.7 K 2024-11-19T12:19:52,080 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/0c1575c4231d4732a08f9c1f364d2654, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/6cc51792fd3649ec82a0a32cb8cca4b8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/4d4efab6ee154fd8ad89b934bfb82cd5] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=35.2 K 2024-11-19T12:19:52,080 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95efa2d640cd4ae2a7fdaf74285d839c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732018788841 2024-11-19T12:19:52,080 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c1575c4231d4732a08f9c1f364d2654, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732018788841 2024-11-19T12:19:52,081 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 282510c956ee4f1d9ceae78d0a067d5b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732018788856 2024-11-19T12:19:52,081 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 6cc51792fd3649ec82a0a32cb8cca4b8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732018788856 2024-11-19T12:19:52,081 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d4efab6ee154fd8ad89b934bfb82cd5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732018790982 2024-11-19T12:19:52,081 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c16e16079724da7a69dce4a68dd3074, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732018790981 2024-11-19T12:19:52,087 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#B#compaction#387 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:52,087 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#A#compaction#388 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:52,088 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/2daca74201fb48dba58d90bc3ec94fcb is 50, key is test_row_0/B:col10/1732018791622/Put/seqid=0 2024-11-19T12:19:52,088 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/3b90284ffb4e4b84ba08db113df3d4be is 50, key is test_row_0/A:col10/1732018791622/Put/seqid=0 2024-11-19T12:19:52,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742285_1461 (size=12104) 2024-11-19T12:19:52,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742286_1462 (size=12104) 2024-11-19T12:19:52,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:52,260 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed69f29417e1710e50942d07ba24647d 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-19T12:19:52,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=A 2024-11-19T12:19:52,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:52,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=B 2024-11-19T12:19:52,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:52,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=C 2024-11-19T12:19:52,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:52,265 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/91679aa70a694abfbd8c260211591587 is 50, key is test_row_0/A:col10/1732018792259/Put/seqid=0 2024-11-19T12:19:52,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742287_1463 (size=14341) 2024-11-19T12:19:52,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/91679aa70a694abfbd8c260211591587 2024-11-19T12:19:52,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:52,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018852277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:52,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:52,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018852278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:52,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:52,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018852279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:52,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/03ea615cf8e14752bbf9e644c58008e4 is 50, key is test_row_0/B:col10/1732018792259/Put/seqid=0 2024-11-19T12:19:52,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:52,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018852283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:52,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742288_1464 (size=12001) 2024-11-19T12:19:52,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:52,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018852384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:52,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:52,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018852385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:52,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:52,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018852385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:52,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:52,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018852390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:52,504 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/2daca74201fb48dba58d90bc3ec94fcb as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/2daca74201fb48dba58d90bc3ec94fcb 2024-11-19T12:19:52,505 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/3b90284ffb4e4b84ba08db113df3d4be as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/3b90284ffb4e4b84ba08db113df3d4be 2024-11-19T12:19:52,507 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed69f29417e1710e50942d07ba24647d/B of ed69f29417e1710e50942d07ba24647d into 2daca74201fb48dba58d90bc3ec94fcb(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:52,508 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:52,508 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/B, priority=13, startTime=1732018792079; duration=0sec 2024-11-19T12:19:52,508 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:52,508 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:B 2024-11-19T12:19:52,508 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:52,509 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:52,509 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/C is initiating minor compaction (all files) 2024-11-19T12:19:52,509 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/C in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:52,509 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/34bfd0265c7c4ec99aee8466bef18979, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/ede33286641942ff88b510e3587f5f65, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/ff64bdff174f40ce8d23f7f4882ee937] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=35.2 K 2024-11-19T12:19:52,509 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 34bfd0265c7c4ec99aee8466bef18979, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732018788841 2024-11-19T12:19:52,510 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting ede33286641942ff88b510e3587f5f65, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732018788856 2024-11-19T12:19:52,510 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting ff64bdff174f40ce8d23f7f4882ee937, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732018790982 2024-11-19T12:19:52,512 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed69f29417e1710e50942d07ba24647d/A of ed69f29417e1710e50942d07ba24647d into 3b90284ffb4e4b84ba08db113df3d4be(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:52,512 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:52,512 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/A, priority=13, startTime=1732018792079; duration=0sec 2024-11-19T12:19:52,512 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:52,512 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:A 2024-11-19T12:19:52,516 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#C#compaction#391 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:52,517 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/a4517f61515b4fdda6f276dde094475a is 50, key is test_row_0/C:col10/1732018791622/Put/seqid=0 2024-11-19T12:19:52,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742289_1465 (size=12104) 2024-11-19T12:19:52,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:52,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018852588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:52,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:52,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018852588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:52,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:52,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018852589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:52,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:52,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018852594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:52,693 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/03ea615cf8e14752bbf9e644c58008e4 2024-11-19T12:19:52,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/e66144eb0cde4075b694214234682389 is 50, key is test_row_0/C:col10/1732018792259/Put/seqid=0 2024-11-19T12:19:52,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742290_1466 (size=12001) 2024-11-19T12:19:52,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:52,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018852894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:52,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:52,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018852896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:52,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:52,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018852897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:52,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:52,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018852901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:52,924 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/a4517f61515b4fdda6f276dde094475a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/a4517f61515b4fdda6f276dde094475a 2024-11-19T12:19:52,928 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed69f29417e1710e50942d07ba24647d/C of ed69f29417e1710e50942d07ba24647d into a4517f61515b4fdda6f276dde094475a(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:52,928 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:52,928 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/C, priority=13, startTime=1732018792079; duration=0sec 2024-11-19T12:19:52,928 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:52,928 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:C 2024-11-19T12:19:52,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-19T12:19:52,978 INFO [Thread-2024 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-19T12:19:52,979 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:19:52,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-19T12:19:52,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-19T12:19:52,981 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:19:52,981 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:19:52,981 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:19:53,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:53,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35254 deadline: 1732018853025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:53,027 DEBUG [Thread-2014 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4165 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., hostname=af314c41f984,36047,1732018661455, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:19:53,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-19T12:19:53,103 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/e66144eb0cde4075b694214234682389 2024-11-19T12:19:53,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/91679aa70a694abfbd8c260211591587 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/91679aa70a694abfbd8c260211591587 2024-11-19T12:19:53,110 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/91679aa70a694abfbd8c260211591587, entries=200, sequenceid=75, filesize=14.0 K 2024-11-19T12:19:53,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/03ea615cf8e14752bbf9e644c58008e4 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/03ea615cf8e14752bbf9e644c58008e4 2024-11-19T12:19:53,114 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/03ea615cf8e14752bbf9e644c58008e4, entries=150, sequenceid=75, filesize=11.7 K 2024-11-19T12:19:53,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/e66144eb0cde4075b694214234682389 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/e66144eb0cde4075b694214234682389 2024-11-19T12:19:53,117 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/e66144eb0cde4075b694214234682389, entries=150, sequenceid=75, filesize=11.7 K 2024-11-19T12:19:53,118 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for ed69f29417e1710e50942d07ba24647d in 858ms, sequenceid=75, compaction requested=false 2024-11-19T12:19:53,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:53,132 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:53,133 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-19T12:19:53,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:53,133 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing ed69f29417e1710e50942d07ba24647d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-19T12:19:53,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=A 2024-11-19T12:19:53,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:53,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=B 2024-11-19T12:19:53,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:53,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=C 2024-11-19T12:19:53,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:53,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/e02d5f9e4ab74ae8819b4f53d481f16b is 50, key is test_row_0/A:col10/1732018792276/Put/seqid=0 2024-11-19T12:19:53,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742291_1467 (size=12001) 2024-11-19T12:19:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-19T12:19:53,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:53,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:53,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:53,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018853429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:53,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:53,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018853430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:53,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:53,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018853430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:53,439 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018853435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:53,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:53,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018853536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:53,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:53,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018853536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:53,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:53,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018853536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:53,542 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/e02d5f9e4ab74ae8819b4f53d481f16b 2024-11-19T12:19:53,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:53,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018853540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:53,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/6d8910c6564d4618b49e96dc60de08cd is 50, key is test_row_0/B:col10/1732018792276/Put/seqid=0 2024-11-19T12:19:53,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742292_1468 (size=12001) 2024-11-19T12:19:53,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-19T12:19:53,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:53,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018853740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:53,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:53,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018853741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:53,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:53,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018853742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:53,748 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:53,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018853745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:53,952 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/6d8910c6564d4618b49e96dc60de08cd 2024-11-19T12:19:53,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/0e5ae113e0c84252b3a2921a44918ee9 is 50, key is test_row_0/C:col10/1732018792276/Put/seqid=0 2024-11-19T12:19:53,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742293_1469 (size=12001) 2024-11-19T12:19:54,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:54,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018854044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:54,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:54,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018854046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:54,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:54,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018854047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:54,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:54,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018854050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:54,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-19T12:19:54,363 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/0e5ae113e0c84252b3a2921a44918ee9 2024-11-19T12:19:54,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/e02d5f9e4ab74ae8819b4f53d481f16b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/e02d5f9e4ab74ae8819b4f53d481f16b 2024-11-19T12:19:54,370 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/e02d5f9e4ab74ae8819b4f53d481f16b, entries=150, sequenceid=91, filesize=11.7 K 2024-11-19T12:19:54,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/6d8910c6564d4618b49e96dc60de08cd as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/6d8910c6564d4618b49e96dc60de08cd 2024-11-19T12:19:54,374 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/6d8910c6564d4618b49e96dc60de08cd, entries=150, sequenceid=91, filesize=11.7 K 2024-11-19T12:19:54,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/0e5ae113e0c84252b3a2921a44918ee9 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/0e5ae113e0c84252b3a2921a44918ee9 2024-11-19T12:19:54,377 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/0e5ae113e0c84252b3a2921a44918ee9, entries=150, sequenceid=91, filesize=11.7 K 2024-11-19T12:19:54,377 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for ed69f29417e1710e50942d07ba24647d in 1244ms, sequenceid=91, compaction requested=true 2024-11-19T12:19:54,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:54,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:54,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-19T12:19:54,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-19T12:19:54,379 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-19T12:19:54,379 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3970 sec 2024-11-19T12:19:54,380 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.4010 sec 2024-11-19T12:19:54,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:54,552 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed69f29417e1710e50942d07ba24647d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-19T12:19:54,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=A 2024-11-19T12:19:54,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:54,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=B 2024-11-19T12:19:54,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:54,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=C 2024-11-19T12:19:54,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:54,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/97230d2e1bf44bf8bfada980397f10d4 is 50, key is test_row_0/A:col10/1732018793434/Put/seqid=0 2024-11-19T12:19:54,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742294_1470 (size=14341) 2024-11-19T12:19:54,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:54,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018854560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:54,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:54,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018854563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:54,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:54,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018854564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:54,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:54,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018854565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:54,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:54,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018854666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:54,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:54,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018854669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:54,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:54,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018854669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:54,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:54,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018854670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:54,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:54,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018854871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:54,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:54,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018854874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:54,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:54,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018854874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:54,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:54,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018854875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:54,962 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/97230d2e1bf44bf8bfada980397f10d4 2024-11-19T12:19:54,968 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/65709719b366485c8d3bf65310211a4f is 50, key is test_row_0/B:col10/1732018793434/Put/seqid=0 2024-11-19T12:19:54,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742295_1471 (size=12001) 2024-11-19T12:19:55,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-19T12:19:55,084 INFO [Thread-2024 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-19T12:19:55,085 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:19:55,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-19T12:19:55,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-19T12:19:55,087 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:19:55,087 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:19:55,087 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:19:55,175 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:55,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018855174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:55,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:55,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018855179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:55,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:55,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018855180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:55,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:55,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018855180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:55,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-19T12:19:55,238 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:55,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-19T12:19:55,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:55,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:55,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:55,239 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:55,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:55,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:55,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/65709719b366485c8d3bf65310211a4f 2024-11-19T12:19:55,385 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/21bcb99dc72e4be8966f7ae982ebb5d8 is 50, key is test_row_0/C:col10/1732018793434/Put/seqid=0 2024-11-19T12:19:55,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-19T12:19:55,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742296_1472 (size=12001) 2024-11-19T12:19:55,391 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:55,391 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-19T12:19:55,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:55,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:55,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:55,392 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:55,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:55,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:55,543 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:55,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-19T12:19:55,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:55,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:55,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:55,544 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:55,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:55,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:55,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:55,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018855676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:55,686 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:55,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018855683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:55,686 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:55,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018855684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:55,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:55,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018855685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:55,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-19T12:19:55,696 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:55,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-19T12:19:55,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:55,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:55,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:55,696 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:55,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:55,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:55,790 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/21bcb99dc72e4be8966f7ae982ebb5d8 2024-11-19T12:19:55,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/97230d2e1bf44bf8bfada980397f10d4 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/97230d2e1bf44bf8bfada980397f10d4 2024-11-19T12:19:55,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/97230d2e1bf44bf8bfada980397f10d4, entries=200, sequenceid=115, filesize=14.0 K 2024-11-19T12:19:55,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/65709719b366485c8d3bf65310211a4f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/65709719b366485c8d3bf65310211a4f 2024-11-19T12:19:55,800 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/65709719b366485c8d3bf65310211a4f, entries=150, sequenceid=115, filesize=11.7 K 2024-11-19T12:19:55,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/21bcb99dc72e4be8966f7ae982ebb5d8 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/21bcb99dc72e4be8966f7ae982ebb5d8 2024-11-19T12:19:55,803 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/21bcb99dc72e4be8966f7ae982ebb5d8, entries=150, sequenceid=115, filesize=11.7 K 2024-11-19T12:19:55,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for ed69f29417e1710e50942d07ba24647d in 1252ms, sequenceid=115, compaction requested=true 2024-11-19T12:19:55,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:55,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:19:55,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:55,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:19:55,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:55,805 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:19:55,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:19:55,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-19T12:19:55,805 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:19:55,806 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52787 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:19:55,806 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:19:55,806 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/B is initiating minor compaction (all files) 2024-11-19T12:19:55,806 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/A is initiating minor compaction (all files) 2024-11-19T12:19:55,806 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/A in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:55,806 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/B in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:55,806 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/3b90284ffb4e4b84ba08db113df3d4be, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/91679aa70a694abfbd8c260211591587, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/e02d5f9e4ab74ae8819b4f53d481f16b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/97230d2e1bf44bf8bfada980397f10d4] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=51.5 K 2024-11-19T12:19:55,806 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/2daca74201fb48dba58d90bc3ec94fcb, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/03ea615cf8e14752bbf9e644c58008e4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/6d8910c6564d4618b49e96dc60de08cd, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/65709719b366485c8d3bf65310211a4f] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=47.0 K 2024-11-19T12:19:55,806 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 2daca74201fb48dba58d90bc3ec94fcb, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732018790982 2024-11-19T12:19:55,806 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b90284ffb4e4b84ba08db113df3d4be, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732018790982 2024-11-19T12:19:55,806 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91679aa70a694abfbd8c260211591587, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732018791627 2024-11-19T12:19:55,806 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 03ea615cf8e14752bbf9e644c58008e4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732018791640 2024-11-19T12:19:55,807 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d8910c6564d4618b49e96dc60de08cd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732018792276 2024-11-19T12:19:55,807 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting e02d5f9e4ab74ae8819b4f53d481f16b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732018792276 2024-11-19T12:19:55,807 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 65709719b366485c8d3bf65310211a4f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732018793410 2024-11-19T12:19:55,807 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97230d2e1bf44bf8bfada980397f10d4, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732018793410 2024-11-19T12:19:55,815 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#B#compaction#399 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:55,815 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#A#compaction#400 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:55,815 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/c974add1f22e41b3b164aa460e1f1df4 is 50, key is test_row_0/B:col10/1732018793434/Put/seqid=0 2024-11-19T12:19:55,815 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/609f065e3c7f4526b45298745ddcd77a is 50, key is test_row_0/A:col10/1732018793434/Put/seqid=0 2024-11-19T12:19:55,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742297_1473 (size=12241) 2024-11-19T12:19:55,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742298_1474 (size=12241) 2024-11-19T12:19:55,848 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:55,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-19T12:19:55,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:55,849 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing ed69f29417e1710e50942d07ba24647d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-19T12:19:55,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=A 2024-11-19T12:19:55,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:55,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=B 2024-11-19T12:19:55,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:55,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=C 2024-11-19T12:19:55,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:55,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/f961634d5238402da6a4a3a276ef6473 is 50, key is test_row_0/A:col10/1732018794558/Put/seqid=0 2024-11-19T12:19:55,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742299_1475 (size=12001) 2024-11-19T12:19:56,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-19T12:19:56,224 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/c974add1f22e41b3b164aa460e1f1df4 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/c974add1f22e41b3b164aa460e1f1df4 2024-11-19T12:19:56,224 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/609f065e3c7f4526b45298745ddcd77a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/609f065e3c7f4526b45298745ddcd77a 2024-11-19T12:19:56,232 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed69f29417e1710e50942d07ba24647d/B of ed69f29417e1710e50942d07ba24647d into c974add1f22e41b3b164aa460e1f1df4(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:56,232 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed69f29417e1710e50942d07ba24647d/A of ed69f29417e1710e50942d07ba24647d into 609f065e3c7f4526b45298745ddcd77a(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:56,232 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:56,232 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:56,232 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/A, priority=12, startTime=1732018795804; duration=0sec 2024-11-19T12:19:56,232 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/B, priority=12, startTime=1732018795804; duration=0sec 2024-11-19T12:19:56,232 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:56,233 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:A 2024-11-19T12:19:56,233 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:19:56,233 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:56,233 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:B 2024-11-19T12:19:56,234 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:19:56,235 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/C is initiating minor compaction (all files) 2024-11-19T12:19:56,235 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/C in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:56,235 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/a4517f61515b4fdda6f276dde094475a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/e66144eb0cde4075b694214234682389, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/0e5ae113e0c84252b3a2921a44918ee9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/21bcb99dc72e4be8966f7ae982ebb5d8] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=47.0 K 2024-11-19T12:19:56,235 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting a4517f61515b4fdda6f276dde094475a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732018790982 2024-11-19T12:19:56,235 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting e66144eb0cde4075b694214234682389, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732018791640 2024-11-19T12:19:56,236 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e5ae113e0c84252b3a2921a44918ee9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732018792276 2024-11-19T12:19:56,236 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21bcb99dc72e4be8966f7ae982ebb5d8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732018793410 2024-11-19T12:19:56,245 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#C#compaction#402 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:56,245 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/8cf499f2369944a8a52f880e7abb2386 is 50, key is test_row_0/C:col10/1732018793434/Put/seqid=0 2024-11-19T12:19:56,256 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/f961634d5238402da6a4a3a276ef6473 2024-11-19T12:19:56,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/61da67786d504dc1a6599aba2ee33b46 is 50, key is test_row_0/B:col10/1732018794558/Put/seqid=0 2024-11-19T12:19:56,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742300_1476 (size=12241) 2024-11-19T12:19:56,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742301_1477 (size=12001) 2024-11-19T12:19:56,274 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/61da67786d504dc1a6599aba2ee33b46 2024-11-19T12:19:56,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/38d2c76aa5744e3ba3e0d4831fe8f070 is 50, key is test_row_0/C:col10/1732018794558/Put/seqid=0 2024-11-19T12:19:56,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742302_1478 (size=12001) 2024-11-19T12:19:56,676 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/8cf499f2369944a8a52f880e7abb2386 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/8cf499f2369944a8a52f880e7abb2386 2024-11-19T12:19:56,679 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed69f29417e1710e50942d07ba24647d/C of ed69f29417e1710e50942d07ba24647d into 8cf499f2369944a8a52f880e7abb2386(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:56,679 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:56,679 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/C, priority=12, startTime=1732018795805; duration=0sec 2024-11-19T12:19:56,679 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:56,680 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:C 2024-11-19T12:19:56,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:56,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:56,693 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/38d2c76aa5744e3ba3e0d4831fe8f070 2024-11-19T12:19:56,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/f961634d5238402da6a4a3a276ef6473 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/f961634d5238402da6a4a3a276ef6473 2024-11-19T12:19:56,700 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/f961634d5238402da6a4a3a276ef6473, entries=150, sequenceid=127, filesize=11.7 K 2024-11-19T12:19:56,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/61da67786d504dc1a6599aba2ee33b46 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/61da67786d504dc1a6599aba2ee33b46 2024-11-19T12:19:56,704 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/61da67786d504dc1a6599aba2ee33b46, entries=150, sequenceid=127, filesize=11.7 K 2024-11-19T12:19:56,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/38d2c76aa5744e3ba3e0d4831fe8f070 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/38d2c76aa5744e3ba3e0d4831fe8f070 2024-11-19T12:19:56,709 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/38d2c76aa5744e3ba3e0d4831fe8f070, entries=150, sequenceid=127, filesize=11.7 K 2024-11-19T12:19:56,710 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=107.34 KB/109920 for ed69f29417e1710e50942d07ba24647d in 861ms, sequenceid=127, compaction requested=false 2024-11-19T12:19:56,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:56,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:56,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-19T12:19:56,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-19T12:19:56,712 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-19T12:19:56,712 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6240 sec 2024-11-19T12:19:56,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:56,713 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed69f29417e1710e50942d07ba24647d 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-19T12:19:56,713 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.6270 sec 2024-11-19T12:19:56,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=A 2024-11-19T12:19:56,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:56,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=B 2024-11-19T12:19:56,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:56,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=C 2024-11-19T12:19:56,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:56,725 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/b0a6ffb1da6a44669bc20237be3182f8 is 50, key is test_row_0/A:col10/1732018796713/Put/seqid=0 2024-11-19T12:19:56,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742303_1479 (size=12151) 2024-11-19T12:19:56,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:56,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018856725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:56,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:56,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018856726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:56,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:56,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018856732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:56,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:56,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018856732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:56,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:56,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018856833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:56,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:56,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018856833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:56,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:56,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018856836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:56,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:56,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018856837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:57,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:57,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018857038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:57,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:57,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018857039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:57,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:57,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018857041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:57,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:57,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018857042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:57,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:57,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35254 deadline: 1732018857058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:57,061 DEBUG [Thread-2014 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8199 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., hostname=af314c41f984,36047,1732018661455, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:19:57,129 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/b0a6ffb1da6a44669bc20237be3182f8 2024-11-19T12:19:57,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/09e2e897aeb549bd9bc2a1a665d79b50 is 50, key is test_row_0/B:col10/1732018796713/Put/seqid=0 2024-11-19T12:19:57,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742304_1480 (size=12151) 2024-11-19T12:19:57,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-19T12:19:57,190 INFO [Thread-2024 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-19T12:19:57,191 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:19:57,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-19T12:19:57,193 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:19:57,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-19T12:19:57,193 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:19:57,193 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:19:57,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-19T12:19:57,344 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:57,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-19T12:19:57,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:57,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:57,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:57,345 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:57,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:57,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:57,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:57,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018857344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:57,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:57,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018857346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:57,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:57,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018857346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:57,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:57,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018857347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:57,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-19T12:19:57,497 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:57,497 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-19T12:19:57,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:57,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:57,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:57,498 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:57,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:57,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:57,539 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/09e2e897aeb549bd9bc2a1a665d79b50 2024-11-19T12:19:57,550 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/5486f0bf34e34fefa467e1b885ebd41c is 50, key is test_row_0/C:col10/1732018796713/Put/seqid=0 2024-11-19T12:19:57,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742305_1481 (size=12151) 2024-11-19T12:19:57,649 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:57,650 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-19T12:19:57,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:57,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:57,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:57,650 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:57,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:57,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:57,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-19T12:19:57,802 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:57,802 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-19T12:19:57,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:57,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:57,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:57,803 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:57,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:57,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:57,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018857851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:57,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018857851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:57,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:57,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018857851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018857853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:57,954 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/5486f0bf34e34fefa467e1b885ebd41c 2024-11-19T12:19:57,954 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:57,955 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-19T12:19:57,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:57,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:57,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:57,955 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:57,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:57,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:19:57,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/b0a6ffb1da6a44669bc20237be3182f8 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/b0a6ffb1da6a44669bc20237be3182f8 2024-11-19T12:19:57,964 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/b0a6ffb1da6a44669bc20237be3182f8, entries=150, sequenceid=152, filesize=11.9 K 2024-11-19T12:19:57,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/09e2e897aeb549bd9bc2a1a665d79b50 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/09e2e897aeb549bd9bc2a1a665d79b50 2024-11-19T12:19:57,967 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/09e2e897aeb549bd9bc2a1a665d79b50, entries=150, sequenceid=152, filesize=11.9 K 2024-11-19T12:19:57,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/5486f0bf34e34fefa467e1b885ebd41c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/5486f0bf34e34fefa467e1b885ebd41c 2024-11-19T12:19:57,971 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/5486f0bf34e34fefa467e1b885ebd41c, entries=150, sequenceid=152, filesize=11.9 K 2024-11-19T12:19:57,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for ed69f29417e1710e50942d07ba24647d in 1258ms, sequenceid=152, compaction requested=true 2024-11-19T12:19:57,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:57,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:19:57,972 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:57,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:57,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:19:57,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:57,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:19:57,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:57,972 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:57,972 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:57,973 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/A is initiating minor compaction (all files) 2024-11-19T12:19:57,973 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:57,973 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/A in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:57,973 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/B is initiating minor compaction (all files) 2024-11-19T12:19:57,973 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/B in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:57,973 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/609f065e3c7f4526b45298745ddcd77a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/f961634d5238402da6a4a3a276ef6473, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/b0a6ffb1da6a44669bc20237be3182f8] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=35.5 K 2024-11-19T12:19:57,973 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/c974add1f22e41b3b164aa460e1f1df4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/61da67786d504dc1a6599aba2ee33b46, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/09e2e897aeb549bd9bc2a1a665d79b50] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=35.5 K 2024-11-19T12:19:57,973 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 609f065e3c7f4526b45298745ddcd77a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732018793410 2024-11-19T12:19:57,973 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting c974add1f22e41b3b164aa460e1f1df4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732018793410 2024-11-19T12:19:57,973 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 61da67786d504dc1a6599aba2ee33b46, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732018794558 2024-11-19T12:19:57,973 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting f961634d5238402da6a4a3a276ef6473, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732018794558 2024-11-19T12:19:57,973 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 09e2e897aeb549bd9bc2a1a665d79b50, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732018796707 2024-11-19T12:19:57,974 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0a6ffb1da6a44669bc20237be3182f8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732018796707 2024-11-19T12:19:57,980 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#A#compaction#408 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:57,980 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/f54cf25c84b04627965e95d42dd1bc60 is 50, key is test_row_0/A:col10/1732018796713/Put/seqid=0 2024-11-19T12:19:57,982 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#B#compaction#409 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:57,983 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/85798bc90be54ff690dd8f30e06540b0 is 50, key is test_row_0/B:col10/1732018796713/Put/seqid=0 2024-11-19T12:19:57,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742306_1482 (size=12493) 2024-11-19T12:19:57,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742307_1483 (size=12493) 2024-11-19T12:19:57,990 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/85798bc90be54ff690dd8f30e06540b0 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/85798bc90be54ff690dd8f30e06540b0 2024-11-19T12:19:57,994 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed69f29417e1710e50942d07ba24647d/B of ed69f29417e1710e50942d07ba24647d into 85798bc90be54ff690dd8f30e06540b0(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:57,994 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:57,994 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/B, priority=13, startTime=1732018797972; duration=0sec 2024-11-19T12:19:57,994 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:19:57,994 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:B 2024-11-19T12:19:57,994 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:19:57,996 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:19:57,996 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/C is initiating minor compaction (all files) 2024-11-19T12:19:57,996 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/C in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:57,996 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/8cf499f2369944a8a52f880e7abb2386, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/38d2c76aa5744e3ba3e0d4831fe8f070, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/5486f0bf34e34fefa467e1b885ebd41c] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=35.5 K 2024-11-19T12:19:57,996 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 8cf499f2369944a8a52f880e7abb2386, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732018793410 2024-11-19T12:19:57,996 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 38d2c76aa5744e3ba3e0d4831fe8f070, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732018794558 2024-11-19T12:19:57,997 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 5486f0bf34e34fefa467e1b885ebd41c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732018796707 2024-11-19T12:19:58,003 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#C#compaction#410 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:19:58,003 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/78602be1a1e34f52b6745b9344716fed is 50, key is test_row_0/C:col10/1732018796713/Put/seqid=0 2024-11-19T12:19:58,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742308_1484 (size=12493) 2024-11-19T12:19:58,010 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/78602be1a1e34f52b6745b9344716fed as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/78602be1a1e34f52b6745b9344716fed 2024-11-19T12:19:58,014 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed69f29417e1710e50942d07ba24647d/C of ed69f29417e1710e50942d07ba24647d into 78602be1a1e34f52b6745b9344716fed(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:58,014 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:58,014 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/C, priority=13, startTime=1732018797972; duration=0sec 2024-11-19T12:19:58,014 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:58,014 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:C 2024-11-19T12:19:58,107 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:19:58,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-19T12:19:58,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:58,108 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing ed69f29417e1710e50942d07ba24647d 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-19T12:19:58,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=A 2024-11-19T12:19:58,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:58,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=B 2024-11-19T12:19:58,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:58,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=C 2024-11-19T12:19:58,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:58,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/4491fd21106c4bf288f3407728efa518 is 50, key is test_row_0/A:col10/1732018796722/Put/seqid=0 2024-11-19T12:19:58,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742309_1485 (size=12151) 2024-11-19T12:19:58,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-19T12:19:58,388 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/f54cf25c84b04627965e95d42dd1bc60 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/f54cf25c84b04627965e95d42dd1bc60 2024-11-19T12:19:58,392 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed69f29417e1710e50942d07ba24647d/A of ed69f29417e1710e50942d07ba24647d into f54cf25c84b04627965e95d42dd1bc60(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:19:58,392 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:58,392 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/A, priority=13, startTime=1732018797971; duration=0sec 2024-11-19T12:19:58,392 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:19:58,392 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:A 2024-11-19T12:19:58,516 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/4491fd21106c4bf288f3407728efa518 2024-11-19T12:19:58,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/3db62bb2331a4c01b38efd59748a288e is 50, key is test_row_0/B:col10/1732018796722/Put/seqid=0 2024-11-19T12:19:58,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742310_1486 (size=12151) 2024-11-19T12:19:58,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:19:58,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:58,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:58,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018858885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:58,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:58,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018858886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:58,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:58,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018858887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:58,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:58,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018858890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:58,926 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/3db62bb2331a4c01b38efd59748a288e 2024-11-19T12:19:58,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/d7b0da52851444a892e6dccbe605d44f is 50, key is test_row_0/C:col10/1732018796722/Put/seqid=0 2024-11-19T12:19:58,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742311_1487 (size=12151) 2024-11-19T12:19:58,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:58,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018858991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:58,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:58,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018858991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:58,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:58,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018858994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:58,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:58,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018858994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:59,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:59,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018859196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:59,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:59,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018859196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:59,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:59,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018859199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:59,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:59,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018859199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:59,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-19T12:19:59,337 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/d7b0da52851444a892e6dccbe605d44f 2024-11-19T12:19:59,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/4491fd21106c4bf288f3407728efa518 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/4491fd21106c4bf288f3407728efa518 2024-11-19T12:19:59,344 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/4491fd21106c4bf288f3407728efa518, entries=150, sequenceid=168, filesize=11.9 K 2024-11-19T12:19:59,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/3db62bb2331a4c01b38efd59748a288e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/3db62bb2331a4c01b38efd59748a288e 2024-11-19T12:19:59,348 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/3db62bb2331a4c01b38efd59748a288e, entries=150, sequenceid=168, filesize=11.9 K 2024-11-19T12:19:59,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/d7b0da52851444a892e6dccbe605d44f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/d7b0da52851444a892e6dccbe605d44f 2024-11-19T12:19:59,351 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/d7b0da52851444a892e6dccbe605d44f, entries=150, sequenceid=168, filesize=11.9 K 2024-11-19T12:19:59,352 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for ed69f29417e1710e50942d07ba24647d in 1245ms, sequenceid=168, compaction requested=false 2024-11-19T12:19:59,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:19:59,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:19:59,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-19T12:19:59,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-19T12:19:59,354 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-19T12:19:59,354 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1600 sec 2024-11-19T12:19:59,355 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 2.1640 sec 2024-11-19T12:19:59,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on ed69f29417e1710e50942d07ba24647d 2024-11-19T12:19:59,503 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed69f29417e1710e50942d07ba24647d 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-19T12:19:59,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=A 2024-11-19T12:19:59,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:59,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=B 2024-11-19T12:19:59,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:59,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=C 2024-11-19T12:19:59,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:19:59,508 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/d3f83a81a3e74bd8b1cd763efb9427fd is 50, key is test_row_0/A:col10/1732018798889/Put/seqid=0 2024-11-19T12:19:59,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742312_1488 (size=16931) 2024-11-19T12:19:59,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:59,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018859515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:59,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:59,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018859515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:59,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:59,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018859515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:59,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:59,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018859519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:59,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:59,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018859619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:59,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:59,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018859620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:59,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:59,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018859620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:59,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:59,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018859623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:59,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:59,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018859822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018859822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:59,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018859822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:59,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:19:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018859827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:19:59,913 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/d3f83a81a3e74bd8b1cd763efb9427fd 2024-11-19T12:19:59,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/de5d42e525e148aeb3d0ddfb87a46f35 is 50, key is test_row_0/B:col10/1732018798889/Put/seqid=0 2024-11-19T12:19:59,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742313_1489 (size=12151) 2024-11-19T12:20:00,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:00,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018860127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:00,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:00,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018860128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:00,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:00,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018860128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:00,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:00,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018860132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:00,323 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/de5d42e525e148aeb3d0ddfb87a46f35 2024-11-19T12:20:00,329 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/191bc43f2a23475482e892604549718b is 50, key is test_row_0/C:col10/1732018798889/Put/seqid=0 2024-11-19T12:20:00,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742314_1490 (size=12151) 2024-11-19T12:20:00,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:00,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018860634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:00,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:00,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018860634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:00,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:00,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018860635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:00,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:00,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018860642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:00,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/191bc43f2a23475482e892604549718b 2024-11-19T12:20:00,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/d3f83a81a3e74bd8b1cd763efb9427fd as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/d3f83a81a3e74bd8b1cd763efb9427fd 2024-11-19T12:20:00,741 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/d3f83a81a3e74bd8b1cd763efb9427fd, entries=250, sequenceid=193, filesize=16.5 K 2024-11-19T12:20:00,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/de5d42e525e148aeb3d0ddfb87a46f35 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/de5d42e525e148aeb3d0ddfb87a46f35 2024-11-19T12:20:00,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/de5d42e525e148aeb3d0ddfb87a46f35, entries=150, sequenceid=193, filesize=11.9 K 2024-11-19T12:20:00,745 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/191bc43f2a23475482e892604549718b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/191bc43f2a23475482e892604549718b 2024-11-19T12:20:00,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/191bc43f2a23475482e892604549718b, entries=150, sequenceid=193, filesize=11.9 K 2024-11-19T12:20:00,749 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for ed69f29417e1710e50942d07ba24647d in 1246ms, sequenceid=193, compaction requested=true 2024-11-19T12:20:00,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:00,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:20:00,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:00,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:20:00,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:00,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:20:00,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:00,749 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:00,749 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:00,750 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41575 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:00,750 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:00,750 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/B is initiating minor compaction (all files) 2024-11-19T12:20:00,750 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/A is initiating minor compaction (all files) 2024-11-19T12:20:00,750 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/B in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:00,750 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/A in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:00,750 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/f54cf25c84b04627965e95d42dd1bc60, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/4491fd21106c4bf288f3407728efa518, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/d3f83a81a3e74bd8b1cd763efb9427fd] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=40.6 K 2024-11-19T12:20:00,750 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/85798bc90be54ff690dd8f30e06540b0, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/3db62bb2331a4c01b38efd59748a288e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/de5d42e525e148aeb3d0ddfb87a46f35] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=35.9 K 2024-11-19T12:20:00,750 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 85798bc90be54ff690dd8f30e06540b0, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732018796707 2024-11-19T12:20:00,750 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting f54cf25c84b04627965e95d42dd1bc60, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732018796707 2024-11-19T12:20:00,751 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4491fd21106c4bf288f3407728efa518, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732018796722 2024-11-19T12:20:00,751 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 3db62bb2331a4c01b38efd59748a288e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732018796722 2024-11-19T12:20:00,751 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3f83a81a3e74bd8b1cd763efb9427fd, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732018798870 2024-11-19T12:20:00,751 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting de5d42e525e148aeb3d0ddfb87a46f35, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732018798870 2024-11-19T12:20:00,756 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#B#compaction#417 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:00,757 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/555e94ed4d954085b38c2131e7a2ae84 is 50, key is test_row_0/B:col10/1732018798889/Put/seqid=0 2024-11-19T12:20:00,759 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#A#compaction#418 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:00,759 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/067da070d7544e01b9e78606df7123d6 is 50, key is test_row_0/A:col10/1732018798889/Put/seqid=0 2024-11-19T12:20:00,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742316_1492 (size=12595) 2024-11-19T12:20:00,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742315_1491 (size=12595) 2024-11-19T12:20:01,167 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/067da070d7544e01b9e78606df7123d6 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/067da070d7544e01b9e78606df7123d6 2024-11-19T12:20:01,167 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/555e94ed4d954085b38c2131e7a2ae84 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/555e94ed4d954085b38c2131e7a2ae84 2024-11-19T12:20:01,171 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed69f29417e1710e50942d07ba24647d/A of ed69f29417e1710e50942d07ba24647d into 067da070d7544e01b9e78606df7123d6(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:01,171 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed69f29417e1710e50942d07ba24647d/B of ed69f29417e1710e50942d07ba24647d into 555e94ed4d954085b38c2131e7a2ae84(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:01,171 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:01,171 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:01,171 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/A, priority=13, startTime=1732018800749; duration=0sec 2024-11-19T12:20:01,171 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/B, priority=13, startTime=1732018800749; duration=0sec 2024-11-19T12:20:01,171 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:01,171 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:A 2024-11-19T12:20:01,171 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:01,171 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:B 2024-11-19T12:20:01,171 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:01,172 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:01,172 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/C is initiating minor compaction (all files) 2024-11-19T12:20:01,172 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/C in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:01,172 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/78602be1a1e34f52b6745b9344716fed, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/d7b0da52851444a892e6dccbe605d44f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/191bc43f2a23475482e892604549718b] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=35.9 K 2024-11-19T12:20:01,172 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78602be1a1e34f52b6745b9344716fed, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732018796707 2024-11-19T12:20:01,173 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7b0da52851444a892e6dccbe605d44f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732018796722 2024-11-19T12:20:01,173 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 191bc43f2a23475482e892604549718b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732018798870 2024-11-19T12:20:01,178 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#C#compaction#419 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:01,179 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/7d64e447d00f42429568bb7e017a1b85 is 50, key is test_row_0/C:col10/1732018798889/Put/seqid=0 2024-11-19T12:20:01,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742317_1493 (size=12595) 2024-11-19T12:20:01,187 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/7d64e447d00f42429568bb7e017a1b85 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/7d64e447d00f42429568bb7e017a1b85 2024-11-19T12:20:01,191 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed69f29417e1710e50942d07ba24647d/C of ed69f29417e1710e50942d07ba24647d into 7d64e447d00f42429568bb7e017a1b85(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:01,191 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:01,191 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/C, priority=13, startTime=1732018800749; duration=0sec 2024-11-19T12:20:01,191 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:01,191 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:C 2024-11-19T12:20:01,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-19T12:20:01,297 INFO [Thread-2024 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-19T12:20:01,298 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:20:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-19T12:20:01,300 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:20:01,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-19T12:20:01,300 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:20:01,300 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:20:01,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-19T12:20:01,452 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:01,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-19T12:20:01,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:01,452 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing ed69f29417e1710e50942d07ba24647d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-19T12:20:01,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=A 2024-11-19T12:20:01,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:01,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=B 2024-11-19T12:20:01,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:01,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=C 2024-11-19T12:20:01,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:01,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/06b104b3ac694c80ac6d76f6132b22d6 is 50, key is test_row_0/A:col10/1732018799518/Put/seqid=0 2024-11-19T12:20:01,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742318_1494 (size=12151) 2024-11-19T12:20:01,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-19T12:20:01,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:20:01,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on ed69f29417e1710e50942d07ba24647d 2024-11-19T12:20:01,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:01,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018861670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:01,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:01,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018861671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:01,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:01,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018861672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:01,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:01,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018861672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:01,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:01,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018861776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:01,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:01,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018861776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:01,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:01,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018861776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:01,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:01,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018861779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:01,861 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/06b104b3ac694c80ac6d76f6132b22d6 2024-11-19T12:20:01,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/b3e54e867ee745369939f6d610e7404f is 50, key is test_row_0/B:col10/1732018799518/Put/seqid=0 2024-11-19T12:20:01,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742319_1495 (size=12151) 2024-11-19T12:20:01,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-19T12:20:01,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:01,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018861978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:01,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:01,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018861978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:01,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:01,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018861981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:01,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:01,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018861983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:02,271 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/b3e54e867ee745369939f6d610e7404f 2024-11-19T12:20:02,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:02,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018862282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:02,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/89f38348649c425e8fa122252d1f61cf is 50, key is test_row_0/C:col10/1732018799518/Put/seqid=0 2024-11-19T12:20:02,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:02,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018862284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:02,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:02,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018862286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:02,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:02,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018862287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:02,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742320_1496 (size=12151) 2024-11-19T12:20:02,293 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/89f38348649c425e8fa122252d1f61cf 2024-11-19T12:20:02,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/06b104b3ac694c80ac6d76f6132b22d6 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/06b104b3ac694c80ac6d76f6132b22d6 2024-11-19T12:20:02,302 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/06b104b3ac694c80ac6d76f6132b22d6, entries=150, sequenceid=208, filesize=11.9 K 2024-11-19T12:20:02,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/b3e54e867ee745369939f6d610e7404f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/b3e54e867ee745369939f6d610e7404f 2024-11-19T12:20:02,306 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/b3e54e867ee745369939f6d610e7404f, entries=150, sequenceid=208, filesize=11.9 K 2024-11-19T12:20:02,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/89f38348649c425e8fa122252d1f61cf as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/89f38348649c425e8fa122252d1f61cf 2024-11-19T12:20:02,309 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/89f38348649c425e8fa122252d1f61cf, entries=150, sequenceid=208, filesize=11.9 K 2024-11-19T12:20:02,310 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for ed69f29417e1710e50942d07ba24647d in 858ms, sequenceid=208, compaction requested=false 2024-11-19T12:20:02,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:02,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:02,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-19T12:20:02,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-19T12:20:02,314 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-19T12:20:02,314 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0110 sec 2024-11-19T12:20:02,316 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.0160 sec 2024-11-19T12:20:02,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-19T12:20:02,403 INFO [Thread-2024 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-19T12:20:02,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:20:02,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-19T12:20:02,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-19T12:20:02,406 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:20:02,406 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:20:02,406 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:20:02,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-19T12:20:02,558 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:02,558 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-19T12:20:02,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:02,558 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing ed69f29417e1710e50942d07ba24647d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-19T12:20:02,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=A 2024-11-19T12:20:02,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:02,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=B 2024-11-19T12:20:02,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:02,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=C 2024-11-19T12:20:02,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:02,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/d1fb03d82022494eb5682b62698dc765 is 50, key is test_row_0/A:col10/1732018801671/Put/seqid=0 2024-11-19T12:20:02,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742321_1497 (size=12151) 2024-11-19T12:20:02,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-19T12:20:02,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on ed69f29417e1710e50942d07ba24647d 2024-11-19T12:20:02,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:20:02,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:02,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018862804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:02,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:02,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018862804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:02,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:02,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:02,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018862805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:02,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018862806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:02,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:02,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018862910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:02,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:02,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018862911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:02,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:02,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018862911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:02,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:02,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018862911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:02,967 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/d1fb03d82022494eb5682b62698dc765 2024-11-19T12:20:02,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/29ed8c5b17d947b4800a729620ee0482 is 50, key is test_row_0/B:col10/1732018801671/Put/seqid=0 2024-11-19T12:20:02,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742322_1498 (size=12151) 2024-11-19T12:20:03,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-19T12:20:03,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:03,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018863114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:03,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:03,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018863114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:03,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:03,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018863114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:03,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:03,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018863116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:03,386 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/29ed8c5b17d947b4800a729620ee0482 2024-11-19T12:20:03,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/8c51adbbe15347dcb7aa2ee02befb431 is 50, key is test_row_0/C:col10/1732018801671/Put/seqid=0 2024-11-19T12:20:03,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742323_1499 (size=12151) 2024-11-19T12:20:03,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:03,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018863418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:03,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:03,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018863418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:03,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:03,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018863419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:03,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:03,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018863421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:03,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-19T12:20:03,796 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/8c51adbbe15347dcb7aa2ee02befb431 2024-11-19T12:20:03,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/d1fb03d82022494eb5682b62698dc765 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/d1fb03d82022494eb5682b62698dc765 2024-11-19T12:20:03,803 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/d1fb03d82022494eb5682b62698dc765, entries=150, sequenceid=233, filesize=11.9 K 2024-11-19T12:20:03,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/29ed8c5b17d947b4800a729620ee0482 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/29ed8c5b17d947b4800a729620ee0482 2024-11-19T12:20:03,806 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/29ed8c5b17d947b4800a729620ee0482, entries=150, sequenceid=233, filesize=11.9 K 2024-11-19T12:20:03,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/8c51adbbe15347dcb7aa2ee02befb431 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/8c51adbbe15347dcb7aa2ee02befb431 2024-11-19T12:20:03,810 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/8c51adbbe15347dcb7aa2ee02befb431, entries=150, sequenceid=233, filesize=11.9 K 2024-11-19T12:20:03,810 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ed69f29417e1710e50942d07ba24647d in 1252ms, sequenceid=233, compaction requested=true 2024-11-19T12:20:03,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:03,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:03,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-19T12:20:03,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-19T12:20:03,813 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-19T12:20:03,813 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4050 sec 2024-11-19T12:20:03,814 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.4090 sec 2024-11-19T12:20:03,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on ed69f29417e1710e50942d07ba24647d 2024-11-19T12:20:03,925 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed69f29417e1710e50942d07ba24647d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-19T12:20:03,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=A 2024-11-19T12:20:03,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:03,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=B 2024-11-19T12:20:03,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:03,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=C 2024-11-19T12:20:03,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:03,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/e4a077a2b65f4d58a3bf8f275b3f005d is 50, key is test_row_0/A:col10/1732018802792/Put/seqid=0 2024-11-19T12:20:03,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742324_1500 (size=16931) 2024-11-19T12:20:03,962 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:03,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018863955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:03,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:03,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018863962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:03,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018863963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:03,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018863963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:04,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:04,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018864064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:04,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:04,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018864068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:04,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:04,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018864073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:04,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:04,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018864073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:04,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:04,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018864268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:04,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:04,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018864272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:04,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:04,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018864277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:04,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:04,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018864279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:04,335 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/e4a077a2b65f4d58a3bf8f275b3f005d 2024-11-19T12:20:04,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/380de839753247548c77c8041e4b03d1 is 50, key is test_row_0/B:col10/1732018802792/Put/seqid=0 2024-11-19T12:20:04,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742325_1501 (size=12151) 2024-11-19T12:20:04,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-19T12:20:04,510 INFO [Thread-2024 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-19T12:20:04,511 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:20:04,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-19T12:20:04,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-19T12:20:04,512 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:20:04,513 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:20:04,513 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:20:04,574 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:04,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018864573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:04,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:04,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018864576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:04,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:04,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018864583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:04,587 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:04,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018864585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:04,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-19T12:20:04,664 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:04,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-19T12:20:04,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:04,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:20:04,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:04,664 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:04,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:04,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:04,745 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/380de839753247548c77c8041e4b03d1 2024-11-19T12:20:04,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/1d4f6b5a250c4ff1ba8c3d10b3e9e4c0 is 50, key is test_row_0/C:col10/1732018802792/Put/seqid=0 2024-11-19T12:20:04,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742326_1502 (size=12151) 2024-11-19T12:20:04,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-19T12:20:04,816 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:04,817 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-19T12:20:04,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:04,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:20:04,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:04,817 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:04,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:04,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:04,969 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:04,969 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-19T12:20:04,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:04,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:20:04,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:04,970 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:04,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:04,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:05,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:05,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018865079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:05,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:05,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018865083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:05,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:05,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018865088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:05,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:05,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018865089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:05,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-19T12:20:05,122 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:05,122 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-19T12:20:05,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:05,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:20:05,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:05,122 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:05,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:05,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:05,161 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/1d4f6b5a250c4ff1ba8c3d10b3e9e4c0 2024-11-19T12:20:05,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/e4a077a2b65f4d58a3bf8f275b3f005d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/e4a077a2b65f4d58a3bf8f275b3f005d 2024-11-19T12:20:05,167 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/e4a077a2b65f4d58a3bf8f275b3f005d, entries=250, sequenceid=246, filesize=16.5 K 2024-11-19T12:20:05,168 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/380de839753247548c77c8041e4b03d1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/380de839753247548c77c8041e4b03d1 2024-11-19T12:20:05,170 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/380de839753247548c77c8041e4b03d1, entries=150, sequenceid=246, filesize=11.9 K 2024-11-19T12:20:05,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/1d4f6b5a250c4ff1ba8c3d10b3e9e4c0 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/1d4f6b5a250c4ff1ba8c3d10b3e9e4c0 2024-11-19T12:20:05,174 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/1d4f6b5a250c4ff1ba8c3d10b3e9e4c0, entries=150, sequenceid=246, filesize=11.9 K 2024-11-19T12:20:05,174 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for ed69f29417e1710e50942d07ba24647d in 1249ms, sequenceid=246, compaction requested=true 2024-11-19T12:20:05,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:05,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:20:05,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:05,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:20:05,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:05,175 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:20:05,175 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:20:05,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:20:05,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:05,175 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:20:05,175 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53828 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:20:05,176 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/B is initiating minor compaction (all files) 2024-11-19T12:20:05,176 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/A is initiating minor compaction (all files) 2024-11-19T12:20:05,176 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/A in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:05,176 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/B in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:05,176 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/555e94ed4d954085b38c2131e7a2ae84, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/b3e54e867ee745369939f6d610e7404f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/29ed8c5b17d947b4800a729620ee0482, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/380de839753247548c77c8041e4b03d1] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=47.9 K 2024-11-19T12:20:05,176 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/067da070d7544e01b9e78606df7123d6, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/06b104b3ac694c80ac6d76f6132b22d6, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/d1fb03d82022494eb5682b62698dc765, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/e4a077a2b65f4d58a3bf8f275b3f005d] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=52.6 K 2024-11-19T12:20:05,176 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 067da070d7544e01b9e78606df7123d6, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732018798870 2024-11-19T12:20:05,176 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 555e94ed4d954085b38c2131e7a2ae84, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732018798870 2024-11-19T12:20:05,176 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06b104b3ac694c80ac6d76f6132b22d6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732018799514 2024-11-19T12:20:05,176 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting b3e54e867ee745369939f6d610e7404f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732018799514 2024-11-19T12:20:05,176 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1fb03d82022494eb5682b62698dc765, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732018801667 2024-11-19T12:20:05,177 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 29ed8c5b17d947b4800a729620ee0482, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732018801667 2024-11-19T12:20:05,177 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4a077a2b65f4d58a3bf8f275b3f005d, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732018802792 2024-11-19T12:20:05,177 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 380de839753247548c77c8041e4b03d1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732018802792 2024-11-19T12:20:05,184 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#A#compaction#429 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:05,185 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/e75caa923a64498cbb1801fccc0d9204 is 50, key is test_row_0/A:col10/1732018802792/Put/seqid=0 2024-11-19T12:20:05,185 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#B#compaction#430 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:05,186 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/a179096a6bbc4b4f8cb33d835eb69d04 is 50, key is test_row_0/B:col10/1732018802792/Put/seqid=0 2024-11-19T12:20:05,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742327_1503 (size=12731) 2024-11-19T12:20:05,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742328_1504 (size=12731) 2024-11-19T12:20:05,274 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:05,274 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-19T12:20:05,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:05,275 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing ed69f29417e1710e50942d07ba24647d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-19T12:20:05,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=A 2024-11-19T12:20:05,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:05,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=B 2024-11-19T12:20:05,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:05,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=C 2024-11-19T12:20:05,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:05,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/ce9691a1463c4b4fac00bfc8a1fb15cc is 50, key is test_row_0/A:col10/1732018803961/Put/seqid=0 2024-11-19T12:20:05,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742329_1505 (size=12301) 2024-11-19T12:20:05,593 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/e75caa923a64498cbb1801fccc0d9204 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/e75caa923a64498cbb1801fccc0d9204 2024-11-19T12:20:05,593 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/a179096a6bbc4b4f8cb33d835eb69d04 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/a179096a6bbc4b4f8cb33d835eb69d04 2024-11-19T12:20:05,597 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed69f29417e1710e50942d07ba24647d/B of ed69f29417e1710e50942d07ba24647d into a179096a6bbc4b4f8cb33d835eb69d04(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:05,597 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed69f29417e1710e50942d07ba24647d/A of ed69f29417e1710e50942d07ba24647d into e75caa923a64498cbb1801fccc0d9204(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:05,597 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:05,597 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:05,597 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/A, priority=12, startTime=1732018805174; duration=0sec 2024-11-19T12:20:05,597 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/B, priority=12, startTime=1732018805175; duration=0sec 2024-11-19T12:20:05,597 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:05,597 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:05,597 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:A 2024-11-19T12:20:05,597 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:B 2024-11-19T12:20:05,597 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:20:05,598 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:20:05,598 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/C is initiating minor compaction (all files) 2024-11-19T12:20:05,598 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/C in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:05,599 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/7d64e447d00f42429568bb7e017a1b85, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/89f38348649c425e8fa122252d1f61cf, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/8c51adbbe15347dcb7aa2ee02befb431, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/1d4f6b5a250c4ff1ba8c3d10b3e9e4c0] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=47.9 K 2024-11-19T12:20:05,599 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d64e447d00f42429568bb7e017a1b85, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732018798870 2024-11-19T12:20:05,599 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89f38348649c425e8fa122252d1f61cf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732018799514 2024-11-19T12:20:05,599 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c51adbbe15347dcb7aa2ee02befb431, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732018801667 2024-11-19T12:20:05,600 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d4f6b5a250c4ff1ba8c3d10b3e9e4c0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732018802792 2024-11-19T12:20:05,606 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#C#compaction#432 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:05,606 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/d8ab731d6fb74b1192f91e6a62f516a4 is 50, key is test_row_0/C:col10/1732018802792/Put/seqid=0 2024-11-19T12:20:05,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-19T12:20:05,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742330_1506 (size=12731) 2024-11-19T12:20:05,622 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/d8ab731d6fb74b1192f91e6a62f516a4 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/d8ab731d6fb74b1192f91e6a62f516a4 2024-11-19T12:20:05,626 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed69f29417e1710e50942d07ba24647d/C of ed69f29417e1710e50942d07ba24647d into d8ab731d6fb74b1192f91e6a62f516a4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:05,626 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:05,626 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/C, priority=12, startTime=1732018805175; duration=0sec 2024-11-19T12:20:05,626 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:05,626 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:C 2024-11-19T12:20:05,691 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/ce9691a1463c4b4fac00bfc8a1fb15cc 2024-11-19T12:20:05,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/9ceec30c2d4d49f885a08febf7ffdd39 is 50, key is test_row_0/B:col10/1732018803961/Put/seqid=0 2024-11-19T12:20:05,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742331_1507 (size=12301) 2024-11-19T12:20:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on ed69f29417e1710e50942d07ba24647d 2024-11-19T12:20:06,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:20:06,100 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/9ceec30c2d4d49f885a08febf7ffdd39 2024-11-19T12:20:06,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018866098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018866099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/a2d894fd30c14ee680219990b8c6aa91 is 50, key is test_row_0/C:col10/1732018803961/Put/seqid=0 2024-11-19T12:20:06,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018866103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018866103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742332_1508 (size=12301) 2024-11-19T12:20:06,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018866204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018866204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018866209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018866209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018866409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018866410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018866412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018866413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,522 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/a2d894fd30c14ee680219990b8c6aa91 2024-11-19T12:20:06,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/ce9691a1463c4b4fac00bfc8a1fb15cc as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/ce9691a1463c4b4fac00bfc8a1fb15cc 2024-11-19T12:20:06,529 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/ce9691a1463c4b4fac00bfc8a1fb15cc, entries=150, sequenceid=270, filesize=12.0 K 2024-11-19T12:20:06,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/9ceec30c2d4d49f885a08febf7ffdd39 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/9ceec30c2d4d49f885a08febf7ffdd39 2024-11-19T12:20:06,532 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/9ceec30c2d4d49f885a08febf7ffdd39, entries=150, sequenceid=270, filesize=12.0 K 2024-11-19T12:20:06,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/a2d894fd30c14ee680219990b8c6aa91 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/a2d894fd30c14ee680219990b8c6aa91 2024-11-19T12:20:06,536 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/a2d894fd30c14ee680219990b8c6aa91, entries=150, sequenceid=270, filesize=12.0 K 2024-11-19T12:20:06,537 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for ed69f29417e1710e50942d07ba24647d in 1263ms, sequenceid=270, compaction requested=false 2024-11-19T12:20:06,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:06,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:06,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-19T12:20:06,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-19T12:20:06,542 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-19T12:20:06,542 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0270 sec 2024-11-19T12:20:06,550 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 2.0380 sec 2024-11-19T12:20:06,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-19T12:20:06,615 INFO [Thread-2024 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-19T12:20:06,616 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:20:06,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-11-19T12:20:06,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-19T12:20:06,618 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:20:06,618 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:20:06,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:20:06,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-19T12:20:06,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on ed69f29417e1710e50942d07ba24647d 2024-11-19T12:20:06,741 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed69f29417e1710e50942d07ba24647d 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-19T12:20:06,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=A 2024-11-19T12:20:06,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:06,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=B 2024-11-19T12:20:06,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:06,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=C 2024-11-19T12:20:06,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:06,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/692471a7a18e494098aaf466d6633f01 is 50, key is test_row_0/A:col10/1732018806723/Put/seqid=0 2024-11-19T12:20:06,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742333_1509 (size=14741) 2024-11-19T12:20:06,770 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:06,770 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-19T12:20:06,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:06,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:20:06,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:06,770 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:06,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:06,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:06,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018866765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018866766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018866769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018866770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018866874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018866874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018866874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:06,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018866878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:06,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-19T12:20:06,922 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:06,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-19T12:20:06,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:06,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:20:06,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:06,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:06,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:06,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,074 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:07,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-19T12:20:07,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:07,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:20:07,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:07,075 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:07,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018867081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:07,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:07,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018867082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:07,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:07,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018867082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:07,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:07,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018867083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:07,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:07,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35254 deadline: 1732018867140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:07,142 DEBUG [Thread-2014 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18279 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., hostname=af314c41f984,36047,1732018661455, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:20:07,155 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/692471a7a18e494098aaf466d6633f01 2024-11-19T12:20:07,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/b5e18f19fc0541c4a9c599c106d2ba71 is 50, key is test_row_0/B:col10/1732018806723/Put/seqid=0 2024-11-19T12:20:07,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742334_1510 (size=12301) 2024-11-19T12:20:07,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-19T12:20:07,226 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:07,227 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-19T12:20:07,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:07,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:20:07,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:07,227 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,379 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:07,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-19T12:20:07,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:07,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:20:07,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:07,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:07,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018867385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:07,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:07,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018867386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:07,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:07,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018867386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:07,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:07,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018867388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:07,531 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:07,532 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-19T12:20:07,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:07,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:20:07,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:07,532 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/b5e18f19fc0541c4a9c599c106d2ba71 2024-11-19T12:20:07,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/c6b7667bbe11418d9266fd464c3962c6 is 50, key is test_row_0/C:col10/1732018806723/Put/seqid=0 2024-11-19T12:20:07,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742335_1511 (size=12301) 2024-11-19T12:20:07,684 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:07,684 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-19T12:20:07,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:07,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:20:07,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:07,684 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-19T12:20:07,836 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:07,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-19T12:20:07,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:07,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:20:07,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:07,837 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:07,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35200 deadline: 1732018867892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:07,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:07,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35226 deadline: 1732018867892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:07,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:07,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35224 deadline: 1732018867894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:07,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:07,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35240 deadline: 1732018867894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:07,981 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/c6b7667bbe11418d9266fd464c3962c6 2024-11-19T12:20:07,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/692471a7a18e494098aaf466d6633f01 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/692471a7a18e494098aaf466d6633f01 2024-11-19T12:20:07,988 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/692471a7a18e494098aaf466d6633f01, entries=200, sequenceid=289, filesize=14.4 K 2024-11-19T12:20:07,989 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:07,989 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-19T12:20:07,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/b5e18f19fc0541c4a9c599c106d2ba71 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/b5e18f19fc0541c4a9c599c106d2ba71 2024-11-19T12:20:07,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:07,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. as already flushing 2024-11-19T12:20:07,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:07,989 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:07,992 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/b5e18f19fc0541c4a9c599c106d2ba71, entries=150, sequenceid=289, filesize=12.0 K 2024-11-19T12:20:07,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/c6b7667bbe11418d9266fd464c3962c6 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/c6b7667bbe11418d9266fd464c3962c6 2024-11-19T12:20:07,996 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/c6b7667bbe11418d9266fd464c3962c6, entries=150, sequenceid=289, filesize=12.0 K 2024-11-19T12:20:07,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for ed69f29417e1710e50942d07ba24647d in 1255ms, sequenceid=289, compaction requested=true 2024-11-19T12:20:07,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:07,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:20:07,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:07,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:20:07,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:07,997 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:07,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed69f29417e1710e50942d07ba24647d:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:20:07,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:07,997 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:07,998 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:07,998 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/B is initiating minor compaction (all files) 2024-11-19T12:20:07,998 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/B in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:07,998 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/a179096a6bbc4b4f8cb33d835eb69d04, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/9ceec30c2d4d49f885a08febf7ffdd39, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/b5e18f19fc0541c4a9c599c106d2ba71] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=36.5 K 2024-11-19T12:20:07,998 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39773 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:07,998 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/A is initiating minor compaction (all files) 2024-11-19T12:20:07,998 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/A in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:07,998 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/e75caa923a64498cbb1801fccc0d9204, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/ce9691a1463c4b4fac00bfc8a1fb15cc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/692471a7a18e494098aaf466d6633f01] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=38.8 K 2024-11-19T12:20:07,998 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting a179096a6bbc4b4f8cb33d835eb69d04, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732018802792 2024-11-19T12:20:07,999 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting e75caa923a64498cbb1801fccc0d9204, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732018802792 2024-11-19T12:20:07,999 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ceec30c2d4d49f885a08febf7ffdd39, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1732018803942 2024-11-19T12:20:07,999 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce9691a1463c4b4fac00bfc8a1fb15cc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1732018803942 2024-11-19T12:20:07,999 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting b5e18f19fc0541c4a9c599c106d2ba71, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732018806716 2024-11-19T12:20:07,999 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 692471a7a18e494098aaf466d6633f01, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732018806102 2024-11-19T12:20:08,004 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#A#compaction#438 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:08,005 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/c4808811e73043a495d01b8c5e8a1656 is 50, key is test_row_0/A:col10/1732018806723/Put/seqid=0 2024-11-19T12:20:08,005 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#B#compaction#439 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:08,005 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/fc34ada116e94564af3b9c61edbc7019 is 50, key is test_row_0/B:col10/1732018806723/Put/seqid=0 2024-11-19T12:20:08,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742336_1512 (size=12983) 2024-11-19T12:20:08,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742337_1513 (size=12983) 2024-11-19T12:20:08,141 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:08,141 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-19T12:20:08,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:08,142 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing ed69f29417e1710e50942d07ba24647d 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-19T12:20:08,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=A 2024-11-19T12:20:08,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:08,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=B 2024-11-19T12:20:08,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:08,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=C 2024-11-19T12:20:08,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:08,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/f796e57aa80e4050af0dba33d1e86e02 is 50, key is test_row_0/A:col10/1732018806761/Put/seqid=0 2024-11-19T12:20:08,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742338_1514 (size=12301) 2024-11-19T12:20:08,150 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/f796e57aa80e4050af0dba33d1e86e02 2024-11-19T12:20:08,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/5f73776f3886422bb7658fe7868ac191 is 50, key is test_row_0/B:col10/1732018806761/Put/seqid=0 2024-11-19T12:20:08,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742339_1515 (size=12301) 2024-11-19T12:20:08,167 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/5f73776f3886422bb7658fe7868ac191 2024-11-19T12:20:08,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/9e1613334aa142788fd94907924a5ffd is 50, key is test_row_0/C:col10/1732018806761/Put/seqid=0 2024-11-19T12:20:08,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742340_1516 (size=12301) 2024-11-19T12:20:08,176 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/9e1613334aa142788fd94907924a5ffd 2024-11-19T12:20:08,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/f796e57aa80e4050af0dba33d1e86e02 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/f796e57aa80e4050af0dba33d1e86e02 2024-11-19T12:20:08,182 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/f796e57aa80e4050af0dba33d1e86e02, entries=150, sequenceid=310, filesize=12.0 K 2024-11-19T12:20:08,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/5f73776f3886422bb7658fe7868ac191 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/5f73776f3886422bb7658fe7868ac191 2024-11-19T12:20:08,185 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/5f73776f3886422bb7658fe7868ac191, entries=150, sequenceid=310, filesize=12.0 K 2024-11-19T12:20:08,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/9e1613334aa142788fd94907924a5ffd as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/9e1613334aa142788fd94907924a5ffd 2024-11-19T12:20:08,188 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/9e1613334aa142788fd94907924a5ffd, entries=150, sequenceid=310, filesize=12.0 K 2024-11-19T12:20:08,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-19T12:20:08,189 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=0 B/0 for ed69f29417e1710e50942d07ba24647d in 47ms, sequenceid=310, compaction requested=true 2024-11-19T12:20:08,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:08,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:08,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-11-19T12:20:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-11-19T12:20:08,192 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-19T12:20:08,192 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5720 sec 2024-11-19T12:20:08,195 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 1.5770 sec 2024-11-19T12:20:08,426 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/c4808811e73043a495d01b8c5e8a1656 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/c4808811e73043a495d01b8c5e8a1656 2024-11-19T12:20:08,429 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed69f29417e1710e50942d07ba24647d/A of ed69f29417e1710e50942d07ba24647d into c4808811e73043a495d01b8c5e8a1656(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:08,429 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:08,429 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/A, priority=13, startTime=1732018807997; duration=0sec 2024-11-19T12:20:08,429 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:08,429 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:A 2024-11-19T12:20:08,429 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:20:08,430 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:20:08,430 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): ed69f29417e1710e50942d07ba24647d/C is initiating minor compaction (all files) 2024-11-19T12:20:08,430 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed69f29417e1710e50942d07ba24647d/C in TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:08,430 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/d8ab731d6fb74b1192f91e6a62f516a4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/a2d894fd30c14ee680219990b8c6aa91, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/c6b7667bbe11418d9266fd464c3962c6, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/9e1613334aa142788fd94907924a5ffd] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp, totalSize=48.5 K 2024-11-19T12:20:08,431 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8ab731d6fb74b1192f91e6a62f516a4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732018802792 2024-11-19T12:20:08,431 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2d894fd30c14ee680219990b8c6aa91, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1732018803942 2024-11-19T12:20:08,431 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting c6b7667bbe11418d9266fd464c3962c6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732018806716 2024-11-19T12:20:08,432 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e1613334aa142788fd94907924a5ffd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1732018806761 2024-11-19T12:20:08,434 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/fc34ada116e94564af3b9c61edbc7019 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/fc34ada116e94564af3b9c61edbc7019 2024-11-19T12:20:08,446 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed69f29417e1710e50942d07ba24647d/B of ed69f29417e1710e50942d07ba24647d into fc34ada116e94564af3b9c61edbc7019(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:08,446 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:08,446 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/B, priority=13, startTime=1732018807997; duration=0sec 2024-11-19T12:20:08,446 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:08,446 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:B 2024-11-19T12:20:08,446 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed69f29417e1710e50942d07ba24647d#C#compaction#443 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:08,447 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/8e36f1e48d4d4b6897c13be18dd33f14 is 50, key is test_row_0/C:col10/1732018806761/Put/seqid=0 2024-11-19T12:20:08,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742341_1517 (size=13017) 2024-11-19T12:20:08,454 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/8e36f1e48d4d4b6897c13be18dd33f14 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/8e36f1e48d4d4b6897c13be18dd33f14 2024-11-19T12:20:08,459 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed69f29417e1710e50942d07ba24647d/C of ed69f29417e1710e50942d07ba24647d into 8e36f1e48d4d4b6897c13be18dd33f14(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:08,459 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:08,459 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d., storeName=ed69f29417e1710e50942d07ba24647d/C, priority=12, startTime=1732018807997; duration=0sec 2024-11-19T12:20:08,459 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:08,459 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed69f29417e1710e50942d07ba24647d:C 2024-11-19T12:20:08,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-19T12:20:08,722 INFO [Thread-2024 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-19T12:20:08,723 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:20:08,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-11-19T12:20:08,724 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:20:08,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-19T12:20:08,725 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:20:08,725 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:20:08,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-19T12:20:08,872 DEBUG [Thread-2033 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4a5128bb to 127.0.0.1:64186 2024-11-19T12:20:08,872 DEBUG [Thread-2033 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:08,873 DEBUG [Thread-2031 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x56a4483a to 127.0.0.1:64186 2024-11-19T12:20:08,873 DEBUG [Thread-2031 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:08,873 DEBUG [Thread-2025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3abeec20 to 127.0.0.1:64186 2024-11-19T12:20:08,873 DEBUG [Thread-2025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:08,873 DEBUG [Thread-2027 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x00df2701 to 127.0.0.1:64186 2024-11-19T12:20:08,874 DEBUG [Thread-2027 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:08,874 DEBUG [Thread-2029 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x35ca71a1 to 127.0.0.1:64186 2024-11-19T12:20:08,874 DEBUG [Thread-2029 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:08,876 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:08,877 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-19T12:20:08,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:08,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:08,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:08,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-11-19T12:20:08,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-11-19T12:20:08,879 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-19T12:20:08,879 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 153 msec 2024-11-19T12:20:08,880 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 156 msec 2024-11-19T12:20:08,901 DEBUG [Thread-2016 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51453050 to 127.0.0.1:64186 2024-11-19T12:20:08,901 DEBUG [Thread-2016 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:08,903 DEBUG [Thread-2022 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x562e0db7 to 127.0.0.1:64186 2024-11-19T12:20:08,903 DEBUG [Thread-2022 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:08,904 DEBUG [Thread-2018 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x632d1806 to 127.0.0.1:64186 2024-11-19T12:20:08,904 DEBUG [Thread-2018 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:08,907 DEBUG [Thread-2020 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4f99adfe to 127.0.0.1:64186 2024-11-19T12:20:08,907 DEBUG [Thread-2020 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:09,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-19T12:20:09,026 INFO [Thread-2024 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-19T12:20:09,928 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T12:20:17,157 DEBUG [Thread-2014 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2fb24d40 to 127.0.0.1:64186 2024-11-19T12:20:17,157 DEBUG [Thread-2014 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:17,157 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-19T12:20:17,157 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 1 2024-11-19T12:20:17,157 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-11-19T12:20:17,157 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 73 2024-11-19T12:20:17,157 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-11-19T12:20:17,157 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-11-19T12:20:17,157 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-19T12:20:17,157 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-19T12:20:17,158 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3230 2024-11-19T12:20:17,158 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9690 rows 2024-11-19T12:20:17,158 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3221 2024-11-19T12:20:17,158 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9663 rows 2024-11-19T12:20:17,158 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3234 2024-11-19T12:20:17,158 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9702 rows 2024-11-19T12:20:17,158 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3224 2024-11-19T12:20:17,158 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9672 rows 2024-11-19T12:20:17,158 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3234 2024-11-19T12:20:17,158 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9702 rows 2024-11-19T12:20:17,158 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-19T12:20:17,158 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x46c37647 to 127.0.0.1:64186 2024-11-19T12:20:17,158 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:17,162 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-19T12:20:17,163 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-19T12:20:17,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-19T12:20:17,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-19T12:20:17,166 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018817165"}]},"ts":"1732018817165"} 2024-11-19T12:20:17,166 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-19T12:20:17,168 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-19T12:20:17,169 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-19T12:20:17,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed69f29417e1710e50942d07ba24647d, UNASSIGN}] 2024-11-19T12:20:17,170 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed69f29417e1710e50942d07ba24647d, UNASSIGN 2024-11-19T12:20:17,170 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=ed69f29417e1710e50942d07ba24647d, regionState=CLOSING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:20:17,171 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-19T12:20:17,171 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; CloseRegionProcedure ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:20:17,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-19T12:20:17,322 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:17,322 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(124): Close ed69f29417e1710e50942d07ba24647d 2024-11-19T12:20:17,323 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-19T12:20:17,323 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1681): Closing ed69f29417e1710e50942d07ba24647d, disabling compactions & flushes 2024-11-19T12:20:17,323 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:17,323 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:17,323 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. after waiting 0 ms 2024-11-19T12:20:17,323 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:17,323 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2837): Flushing ed69f29417e1710e50942d07ba24647d 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-19T12:20:17,323 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=A 2024-11-19T12:20:17,323 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:17,323 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=B 2024-11-19T12:20:17,323 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:17,323 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed69f29417e1710e50942d07ba24647d, store=C 2024-11-19T12:20:17,323 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:17,326 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/188f34162c48459caad8f8209bb98d60 is 50, key is test_row_0/A:col10/1732018817156/Put/seqid=0 2024-11-19T12:20:17,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742342_1518 (size=9857) 2024-11-19T12:20:17,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-19T12:20:17,730 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/188f34162c48459caad8f8209bb98d60 2024-11-19T12:20:17,735 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/afd169d66b734d6bb03104b9cbed40a9 is 50, key is test_row_0/B:col10/1732018817156/Put/seqid=0 2024-11-19T12:20:17,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742343_1519 (size=9857) 2024-11-19T12:20:17,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-19T12:20:18,139 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/afd169d66b734d6bb03104b9cbed40a9 2024-11-19T12:20:18,144 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/22a44d0db1ad4e5d813955070cb7fdc8 is 50, key is test_row_0/C:col10/1732018817156/Put/seqid=0 2024-11-19T12:20:18,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742344_1520 (size=9857) 2024-11-19T12:20:18,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-19T12:20:18,547 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/22a44d0db1ad4e5d813955070cb7fdc8 2024-11-19T12:20:18,550 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/A/188f34162c48459caad8f8209bb98d60 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/188f34162c48459caad8f8209bb98d60 2024-11-19T12:20:18,553 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/188f34162c48459caad8f8209bb98d60, entries=100, sequenceid=322, filesize=9.6 K 2024-11-19T12:20:18,553 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/B/afd169d66b734d6bb03104b9cbed40a9 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/afd169d66b734d6bb03104b9cbed40a9 2024-11-19T12:20:18,556 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/afd169d66b734d6bb03104b9cbed40a9, entries=100, sequenceid=322, filesize=9.6 K 2024-11-19T12:20:18,556 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/.tmp/C/22a44d0db1ad4e5d813955070cb7fdc8 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/22a44d0db1ad4e5d813955070cb7fdc8 2024-11-19T12:20:18,558 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/22a44d0db1ad4e5d813955070cb7fdc8, entries=100, sequenceid=322, filesize=9.6 K 2024-11-19T12:20:18,559 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for ed69f29417e1710e50942d07ba24647d in 1236ms, sequenceid=322, compaction requested=true 2024-11-19T12:20:18,559 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/95efa2d640cd4ae2a7fdaf74285d839c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/282510c956ee4f1d9ceae78d0a067d5b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/1c16e16079724da7a69dce4a68dd3074, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/3b90284ffb4e4b84ba08db113df3d4be, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/91679aa70a694abfbd8c260211591587, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/e02d5f9e4ab74ae8819b4f53d481f16b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/97230d2e1bf44bf8bfada980397f10d4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/609f065e3c7f4526b45298745ddcd77a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/f961634d5238402da6a4a3a276ef6473, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/f54cf25c84b04627965e95d42dd1bc60, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/b0a6ffb1da6a44669bc20237be3182f8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/4491fd21106c4bf288f3407728efa518, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/d3f83a81a3e74bd8b1cd763efb9427fd, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/067da070d7544e01b9e78606df7123d6, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/06b104b3ac694c80ac6d76f6132b22d6, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/d1fb03d82022494eb5682b62698dc765, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/e4a077a2b65f4d58a3bf8f275b3f005d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/e75caa923a64498cbb1801fccc0d9204, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/ce9691a1463c4b4fac00bfc8a1fb15cc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/692471a7a18e494098aaf466d6633f01] to archive 2024-11-19T12:20:18,560 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:20:18,561 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/95efa2d640cd4ae2a7fdaf74285d839c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/95efa2d640cd4ae2a7fdaf74285d839c 2024-11-19T12:20:18,562 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/282510c956ee4f1d9ceae78d0a067d5b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/282510c956ee4f1d9ceae78d0a067d5b 2024-11-19T12:20:18,563 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/1c16e16079724da7a69dce4a68dd3074 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/1c16e16079724da7a69dce4a68dd3074 2024-11-19T12:20:18,564 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/3b90284ffb4e4b84ba08db113df3d4be to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/3b90284ffb4e4b84ba08db113df3d4be 2024-11-19T12:20:18,564 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/91679aa70a694abfbd8c260211591587 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/91679aa70a694abfbd8c260211591587 2024-11-19T12:20:18,565 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/e02d5f9e4ab74ae8819b4f53d481f16b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/e02d5f9e4ab74ae8819b4f53d481f16b 2024-11-19T12:20:18,566 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/97230d2e1bf44bf8bfada980397f10d4 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/97230d2e1bf44bf8bfada980397f10d4 2024-11-19T12:20:18,567 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/609f065e3c7f4526b45298745ddcd77a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/609f065e3c7f4526b45298745ddcd77a 2024-11-19T12:20:18,568 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/f961634d5238402da6a4a3a276ef6473 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/f961634d5238402da6a4a3a276ef6473 2024-11-19T12:20:18,568 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/f54cf25c84b04627965e95d42dd1bc60 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/f54cf25c84b04627965e95d42dd1bc60 2024-11-19T12:20:18,569 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/b0a6ffb1da6a44669bc20237be3182f8 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/b0a6ffb1da6a44669bc20237be3182f8 2024-11-19T12:20:18,570 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/4491fd21106c4bf288f3407728efa518 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/4491fd21106c4bf288f3407728efa518 2024-11-19T12:20:18,571 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/d3f83a81a3e74bd8b1cd763efb9427fd to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/d3f83a81a3e74bd8b1cd763efb9427fd 2024-11-19T12:20:18,572 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/067da070d7544e01b9e78606df7123d6 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/067da070d7544e01b9e78606df7123d6 2024-11-19T12:20:18,572 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/06b104b3ac694c80ac6d76f6132b22d6 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/06b104b3ac694c80ac6d76f6132b22d6 2024-11-19T12:20:18,573 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/d1fb03d82022494eb5682b62698dc765 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/d1fb03d82022494eb5682b62698dc765 2024-11-19T12:20:18,574 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/e4a077a2b65f4d58a3bf8f275b3f005d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/e4a077a2b65f4d58a3bf8f275b3f005d 2024-11-19T12:20:18,575 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/e75caa923a64498cbb1801fccc0d9204 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/e75caa923a64498cbb1801fccc0d9204 2024-11-19T12:20:18,575 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/ce9691a1463c4b4fac00bfc8a1fb15cc to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/ce9691a1463c4b4fac00bfc8a1fb15cc 2024-11-19T12:20:18,576 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/692471a7a18e494098aaf466d6633f01 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/692471a7a18e494098aaf466d6633f01 2024-11-19T12:20:18,577 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/0c1575c4231d4732a08f9c1f364d2654, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/6cc51792fd3649ec82a0a32cb8cca4b8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/2daca74201fb48dba58d90bc3ec94fcb, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/4d4efab6ee154fd8ad89b934bfb82cd5, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/03ea615cf8e14752bbf9e644c58008e4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/6d8910c6564d4618b49e96dc60de08cd, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/c974add1f22e41b3b164aa460e1f1df4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/65709719b366485c8d3bf65310211a4f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/61da67786d504dc1a6599aba2ee33b46, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/85798bc90be54ff690dd8f30e06540b0, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/09e2e897aeb549bd9bc2a1a665d79b50, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/3db62bb2331a4c01b38efd59748a288e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/555e94ed4d954085b38c2131e7a2ae84, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/de5d42e525e148aeb3d0ddfb87a46f35, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/b3e54e867ee745369939f6d610e7404f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/29ed8c5b17d947b4800a729620ee0482, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/a179096a6bbc4b4f8cb33d835eb69d04, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/380de839753247548c77c8041e4b03d1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/9ceec30c2d4d49f885a08febf7ffdd39, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/b5e18f19fc0541c4a9c599c106d2ba71] to archive 2024-11-19T12:20:18,578 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:20:18,579 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/0c1575c4231d4732a08f9c1f364d2654 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/0c1575c4231d4732a08f9c1f364d2654 2024-11-19T12:20:18,579 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/6cc51792fd3649ec82a0a32cb8cca4b8 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/6cc51792fd3649ec82a0a32cb8cca4b8 2024-11-19T12:20:18,580 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/2daca74201fb48dba58d90bc3ec94fcb to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/2daca74201fb48dba58d90bc3ec94fcb 2024-11-19T12:20:18,581 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/4d4efab6ee154fd8ad89b934bfb82cd5 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/4d4efab6ee154fd8ad89b934bfb82cd5 2024-11-19T12:20:18,582 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/03ea615cf8e14752bbf9e644c58008e4 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/03ea615cf8e14752bbf9e644c58008e4 2024-11-19T12:20:18,582 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/6d8910c6564d4618b49e96dc60de08cd to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/6d8910c6564d4618b49e96dc60de08cd 2024-11-19T12:20:18,583 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/c974add1f22e41b3b164aa460e1f1df4 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/c974add1f22e41b3b164aa460e1f1df4 2024-11-19T12:20:18,584 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/65709719b366485c8d3bf65310211a4f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/65709719b366485c8d3bf65310211a4f 2024-11-19T12:20:18,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/61da67786d504dc1a6599aba2ee33b46 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/61da67786d504dc1a6599aba2ee33b46 2024-11-19T12:20:18,586 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/85798bc90be54ff690dd8f30e06540b0 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/85798bc90be54ff690dd8f30e06540b0 2024-11-19T12:20:18,586 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/09e2e897aeb549bd9bc2a1a665d79b50 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/09e2e897aeb549bd9bc2a1a665d79b50 2024-11-19T12:20:18,587 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/3db62bb2331a4c01b38efd59748a288e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/3db62bb2331a4c01b38efd59748a288e 2024-11-19T12:20:18,588 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/555e94ed4d954085b38c2131e7a2ae84 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/555e94ed4d954085b38c2131e7a2ae84 2024-11-19T12:20:18,589 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/de5d42e525e148aeb3d0ddfb87a46f35 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/de5d42e525e148aeb3d0ddfb87a46f35 2024-11-19T12:20:18,590 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/b3e54e867ee745369939f6d610e7404f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/b3e54e867ee745369939f6d610e7404f 2024-11-19T12:20:18,590 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/29ed8c5b17d947b4800a729620ee0482 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/29ed8c5b17d947b4800a729620ee0482 2024-11-19T12:20:18,591 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/a179096a6bbc4b4f8cb33d835eb69d04 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/a179096a6bbc4b4f8cb33d835eb69d04 2024-11-19T12:20:18,592 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/380de839753247548c77c8041e4b03d1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/380de839753247548c77c8041e4b03d1 2024-11-19T12:20:18,593 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/9ceec30c2d4d49f885a08febf7ffdd39 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/9ceec30c2d4d49f885a08febf7ffdd39 2024-11-19T12:20:18,594 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/b5e18f19fc0541c4a9c599c106d2ba71 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/b5e18f19fc0541c4a9c599c106d2ba71 2024-11-19T12:20:18,594 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/34bfd0265c7c4ec99aee8466bef18979, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/ede33286641942ff88b510e3587f5f65, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/a4517f61515b4fdda6f276dde094475a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/ff64bdff174f40ce8d23f7f4882ee937, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/e66144eb0cde4075b694214234682389, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/0e5ae113e0c84252b3a2921a44918ee9, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/8cf499f2369944a8a52f880e7abb2386, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/21bcb99dc72e4be8966f7ae982ebb5d8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/38d2c76aa5744e3ba3e0d4831fe8f070, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/78602be1a1e34f52b6745b9344716fed, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/5486f0bf34e34fefa467e1b885ebd41c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/d7b0da52851444a892e6dccbe605d44f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/7d64e447d00f42429568bb7e017a1b85, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/191bc43f2a23475482e892604549718b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/89f38348649c425e8fa122252d1f61cf, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/8c51adbbe15347dcb7aa2ee02befb431, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/d8ab731d6fb74b1192f91e6a62f516a4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/1d4f6b5a250c4ff1ba8c3d10b3e9e4c0, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/a2d894fd30c14ee680219990b8c6aa91, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/c6b7667bbe11418d9266fd464c3962c6, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/9e1613334aa142788fd94907924a5ffd] to archive 2024-11-19T12:20:18,595 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:20:18,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/34bfd0265c7c4ec99aee8466bef18979 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/34bfd0265c7c4ec99aee8466bef18979 2024-11-19T12:20:18,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/ede33286641942ff88b510e3587f5f65 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/ede33286641942ff88b510e3587f5f65 2024-11-19T12:20:18,598 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/a4517f61515b4fdda6f276dde094475a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/a4517f61515b4fdda6f276dde094475a 2024-11-19T12:20:18,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/ff64bdff174f40ce8d23f7f4882ee937 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/ff64bdff174f40ce8d23f7f4882ee937 2024-11-19T12:20:18,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/e66144eb0cde4075b694214234682389 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/e66144eb0cde4075b694214234682389 2024-11-19T12:20:18,600 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/0e5ae113e0c84252b3a2921a44918ee9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/0e5ae113e0c84252b3a2921a44918ee9 2024-11-19T12:20:18,601 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/8cf499f2369944a8a52f880e7abb2386 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/8cf499f2369944a8a52f880e7abb2386 2024-11-19T12:20:18,602 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/21bcb99dc72e4be8966f7ae982ebb5d8 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/21bcb99dc72e4be8966f7ae982ebb5d8 2024-11-19T12:20:18,603 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/38d2c76aa5744e3ba3e0d4831fe8f070 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/38d2c76aa5744e3ba3e0d4831fe8f070 2024-11-19T12:20:18,603 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/78602be1a1e34f52b6745b9344716fed to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/78602be1a1e34f52b6745b9344716fed 2024-11-19T12:20:18,604 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/5486f0bf34e34fefa467e1b885ebd41c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/5486f0bf34e34fefa467e1b885ebd41c 2024-11-19T12:20:18,605 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/d7b0da52851444a892e6dccbe605d44f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/d7b0da52851444a892e6dccbe605d44f 2024-11-19T12:20:18,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/7d64e447d00f42429568bb7e017a1b85 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/7d64e447d00f42429568bb7e017a1b85 2024-11-19T12:20:18,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/191bc43f2a23475482e892604549718b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/191bc43f2a23475482e892604549718b 2024-11-19T12:20:18,607 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/89f38348649c425e8fa122252d1f61cf to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/89f38348649c425e8fa122252d1f61cf 2024-11-19T12:20:18,608 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/8c51adbbe15347dcb7aa2ee02befb431 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/8c51adbbe15347dcb7aa2ee02befb431 2024-11-19T12:20:18,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/d8ab731d6fb74b1192f91e6a62f516a4 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/d8ab731d6fb74b1192f91e6a62f516a4 2024-11-19T12:20:18,610 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/1d4f6b5a250c4ff1ba8c3d10b3e9e4c0 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/1d4f6b5a250c4ff1ba8c3d10b3e9e4c0 2024-11-19T12:20:18,610 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/a2d894fd30c14ee680219990b8c6aa91 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/a2d894fd30c14ee680219990b8c6aa91 2024-11-19T12:20:18,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/c6b7667bbe11418d9266fd464c3962c6 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/c6b7667bbe11418d9266fd464c3962c6 2024-11-19T12:20:18,612 DEBUG [StoreCloser-TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/9e1613334aa142788fd94907924a5ffd to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/9e1613334aa142788fd94907924a5ffd 2024-11-19T12:20:18,615 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/recovered.edits/325.seqid, newMaxSeqId=325, maxSeqId=1 2024-11-19T12:20:18,616 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d. 2024-11-19T12:20:18,616 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1635): Region close journal for ed69f29417e1710e50942d07ba24647d: 2024-11-19T12:20:18,617 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(170): Closed ed69f29417e1710e50942d07ba24647d 2024-11-19T12:20:18,617 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=ed69f29417e1710e50942d07ba24647d, regionState=CLOSED 2024-11-19T12:20:18,619 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-19T12:20:18,619 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; CloseRegionProcedure ed69f29417e1710e50942d07ba24647d, server=af314c41f984,36047,1732018661455 in 1.4470 sec 2024-11-19T12:20:18,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-11-19T12:20:18,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed69f29417e1710e50942d07ba24647d, UNASSIGN in 1.4490 sec 2024-11-19T12:20:18,621 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-19T12:20:18,621 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4510 sec 2024-11-19T12:20:18,622 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018818622"}]},"ts":"1732018818622"} 2024-11-19T12:20:18,622 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-19T12:20:18,624 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-19T12:20:18,625 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4610 sec 2024-11-19T12:20:19,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-19T12:20:19,269 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-11-19T12:20:19,269 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-19T12:20:19,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:20:19,270 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=152, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:20:19,271 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=152, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:20:19,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-19T12:20:19,272 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d 2024-11-19T12:20:19,273 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/recovered.edits] 2024-11-19T12:20:19,276 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/188f34162c48459caad8f8209bb98d60 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/188f34162c48459caad8f8209bb98d60 2024-11-19T12:20:19,276 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/c4808811e73043a495d01b8c5e8a1656 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/c4808811e73043a495d01b8c5e8a1656 2024-11-19T12:20:19,277 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/f796e57aa80e4050af0dba33d1e86e02 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/A/f796e57aa80e4050af0dba33d1e86e02 2024-11-19T12:20:19,279 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/5f73776f3886422bb7658fe7868ac191 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/5f73776f3886422bb7658fe7868ac191 2024-11-19T12:20:19,280 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/afd169d66b734d6bb03104b9cbed40a9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/afd169d66b734d6bb03104b9cbed40a9 2024-11-19T12:20:19,280 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/fc34ada116e94564af3b9c61edbc7019 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/B/fc34ada116e94564af3b9c61edbc7019 2024-11-19T12:20:19,282 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/22a44d0db1ad4e5d813955070cb7fdc8 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/22a44d0db1ad4e5d813955070cb7fdc8 2024-11-19T12:20:19,283 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/8e36f1e48d4d4b6897c13be18dd33f14 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/C/8e36f1e48d4d4b6897c13be18dd33f14 2024-11-19T12:20:19,284 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/recovered.edits/325.seqid to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d/recovered.edits/325.seqid 2024-11-19T12:20:19,285 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/ed69f29417e1710e50942d07ba24647d 2024-11-19T12:20:19,285 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-19T12:20:19,286 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=152, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:20:19,288 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-19T12:20:19,289 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-19T12:20:19,290 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=152, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:20:19,290 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-19T12:20:19,290 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732018819290"}]},"ts":"9223372036854775807"} 2024-11-19T12:20:19,291 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-19T12:20:19,291 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => ed69f29417e1710e50942d07ba24647d, NAME => 'TestAcidGuarantees,,1732018786686.ed69f29417e1710e50942d07ba24647d.', STARTKEY => '', ENDKEY => ''}] 2024-11-19T12:20:19,291 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-19T12:20:19,291 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732018819291"}]},"ts":"9223372036854775807"} 2024-11-19T12:20:19,293 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-19T12:20:19,294 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=152, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:20:19,295 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 25 msec 2024-11-19T12:20:19,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-19T12:20:19,372 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 152 completed 2024-11-19T12:20:19,380 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=237 (was 237), OpenFileDescriptor=443 (was 448), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=329 (was 420), ProcessCount=9 (was 11), AvailableMemoryMB=3222 (was 2848) - AvailableMemoryMB LEAK? - 2024-11-19T12:20:19,388 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=237, OpenFileDescriptor=443, MaxFileDescriptor=1048576, SystemLoadAverage=329, ProcessCount=9, AvailableMemoryMB=3222 2024-11-19T12:20:19,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-19T12:20:19,389 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:20:19,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=153, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-19T12:20:19,390 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T12:20:19,390 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:19,390 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 153 2024-11-19T12:20:19,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-19T12:20:19,391 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T12:20:19,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742345_1521 (size=960) 2024-11-19T12:20:19,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-19T12:20:19,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-19T12:20:19,797 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22 2024-11-19T12:20:19,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742346_1522 (size=53) 2024-11-19T12:20:19,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-19T12:20:20,202 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:20:20,202 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 7d5f97e10ca89010b1b0ccd9ef5577c9, disabling compactions & flushes 2024-11-19T12:20:20,202 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:20,202 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:20,202 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. after waiting 0 ms 2024-11-19T12:20:20,202 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:20,202 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:20,202 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:20,203 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T12:20:20,203 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732018820203"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732018820203"}]},"ts":"1732018820203"} 2024-11-19T12:20:20,204 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-19T12:20:20,205 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T12:20:20,205 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018820205"}]},"ts":"1732018820205"} 2024-11-19T12:20:20,205 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-19T12:20:20,208 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7d5f97e10ca89010b1b0ccd9ef5577c9, ASSIGN}] 2024-11-19T12:20:20,209 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7d5f97e10ca89010b1b0ccd9ef5577c9, ASSIGN 2024-11-19T12:20:20,209 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7d5f97e10ca89010b1b0ccd9ef5577c9, ASSIGN; state=OFFLINE, location=af314c41f984,36047,1732018661455; forceNewPlan=false, retain=false 2024-11-19T12:20:20,360 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=7d5f97e10ca89010b1b0ccd9ef5577c9, regionState=OPENING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:20:20,361 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; OpenRegionProcedure 7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:20:20,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-19T12:20:20,512 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:20,514 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:20,514 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(7285): Opening region: {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:20:20,515 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:20,515 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:20:20,515 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(7327): checking encryption for 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:20,515 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(7330): checking classloading for 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:20,516 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:20,517 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:20:20,517 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7d5f97e10ca89010b1b0ccd9ef5577c9 columnFamilyName A 2024-11-19T12:20:20,517 DEBUG [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:20,517 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.HStore(327): Store=7d5f97e10ca89010b1b0ccd9ef5577c9/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:20:20,517 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:20,518 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:20:20,518 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7d5f97e10ca89010b1b0ccd9ef5577c9 columnFamilyName B 2024-11-19T12:20:20,518 DEBUG [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:20,519 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.HStore(327): Store=7d5f97e10ca89010b1b0ccd9ef5577c9/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:20:20,519 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:20,520 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:20:20,520 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7d5f97e10ca89010b1b0ccd9ef5577c9 columnFamilyName C 2024-11-19T12:20:20,520 DEBUG [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:20,520 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.HStore(327): Store=7d5f97e10ca89010b1b0ccd9ef5577c9/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:20:20,520 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:20,521 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:20,521 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:20,522 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:20:20,523 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(1085): writing seq id for 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:20,524 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:20:20,525 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(1102): Opened 7d5f97e10ca89010b1b0ccd9ef5577c9; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75231631, jitterRate=0.12103866040706635}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:20:20,525 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(1001): Region open journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:20,526 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., pid=155, masterSystemTime=1732018820512 2024-11-19T12:20:20,527 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:20,527 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:20,528 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=7d5f97e10ca89010b1b0ccd9ef5577c9, regionState=OPEN, openSeqNum=2, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:20:20,529 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-11-19T12:20:20,529 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; OpenRegionProcedure 7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 in 167 msec 2024-11-19T12:20:20,530 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-11-19T12:20:20,530 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7d5f97e10ca89010b1b0ccd9ef5577c9, ASSIGN in 321 msec 2024-11-19T12:20:20,531 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T12:20:20,531 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018820531"}]},"ts":"1732018820531"} 2024-11-19T12:20:20,532 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-19T12:20:20,534 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T12:20:20,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1450 sec 2024-11-19T12:20:21,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-19T12:20:21,495 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 153 completed 2024-11-19T12:20:21,496 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4bbf3c1c to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@65aca2ac 2024-11-19T12:20:21,501 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c0f5004, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:20:21,503 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:20:21,504 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48936, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:20:21,504 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T12:20:21,505 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51910, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T12:20:21,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-19T12:20:21,507 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:20:21,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=156, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-19T12:20:21,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742347_1523 (size=996) 2024-11-19T12:20:21,916 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-19T12:20:21,916 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-19T12:20:21,918 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-19T12:20:21,919 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7d5f97e10ca89010b1b0ccd9ef5577c9, REOPEN/MOVE}] 2024-11-19T12:20:21,919 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7d5f97e10ca89010b1b0ccd9ef5577c9, REOPEN/MOVE 2024-11-19T12:20:21,920 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=7d5f97e10ca89010b1b0ccd9ef5577c9, regionState=CLOSING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:20:21,921 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-19T12:20:21,921 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE; CloseRegionProcedure 7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:20:22,072 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:22,072 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(124): Close 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:22,072 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-19T12:20:22,072 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1681): Closing 7d5f97e10ca89010b1b0ccd9ef5577c9, disabling compactions & flushes 2024-11-19T12:20:22,072 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:22,072 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:22,072 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. after waiting 0 ms 2024-11-19T12:20:22,072 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:22,075 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-19T12:20:22,075 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:22,075 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1635): Region close journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:22,075 WARN [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegionServer(3786): Not adding moved region record: 7d5f97e10ca89010b1b0ccd9ef5577c9 to self. 2024-11-19T12:20:22,076 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(170): Closed 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:22,077 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=7d5f97e10ca89010b1b0ccd9ef5577c9, regionState=CLOSED 2024-11-19T12:20:22,078 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-11-19T12:20:22,078 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; CloseRegionProcedure 7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 in 156 msec 2024-11-19T12:20:22,079 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7d5f97e10ca89010b1b0ccd9ef5577c9, REOPEN/MOVE; state=CLOSED, location=af314c41f984,36047,1732018661455; forceNewPlan=false, retain=true 2024-11-19T12:20:22,229 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=7d5f97e10ca89010b1b0ccd9ef5577c9, regionState=OPENING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:20:22,230 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=158, state=RUNNABLE; OpenRegionProcedure 7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:20:22,381 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:22,383 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:22,383 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7285): Opening region: {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:20:22,384 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:22,384 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:20:22,384 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7327): checking encryption for 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:22,384 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7330): checking classloading for 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:22,385 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:22,385 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:20:22,386 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7d5f97e10ca89010b1b0ccd9ef5577c9 columnFamilyName A 2024-11-19T12:20:22,386 DEBUG [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:22,387 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.HStore(327): Store=7d5f97e10ca89010b1b0ccd9ef5577c9/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:20:22,387 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:22,387 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:20:22,387 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7d5f97e10ca89010b1b0ccd9ef5577c9 columnFamilyName B 2024-11-19T12:20:22,387 DEBUG [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:22,388 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.HStore(327): Store=7d5f97e10ca89010b1b0ccd9ef5577c9/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:20:22,388 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:22,388 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-19T12:20:22,388 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7d5f97e10ca89010b1b0ccd9ef5577c9 columnFamilyName C 2024-11-19T12:20:22,388 DEBUG [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:22,388 INFO [StoreOpener-7d5f97e10ca89010b1b0ccd9ef5577c9-1 {}] regionserver.HStore(327): Store=7d5f97e10ca89010b1b0ccd9ef5577c9/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:20:22,389 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:22,389 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:22,390 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:22,391 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:20:22,392 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1085): writing seq id for 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:22,392 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1102): Opened 7d5f97e10ca89010b1b0ccd9ef5577c9; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65304338, jitterRate=-0.026889532804489136}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:20:22,393 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1001): Region open journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:22,393 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., pid=160, masterSystemTime=1732018822381 2024-11-19T12:20:22,394 DEBUG [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:22,395 INFO [RS_OPEN_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:22,395 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=7d5f97e10ca89010b1b0ccd9ef5577c9, regionState=OPEN, openSeqNum=5, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:20:22,396 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=158 2024-11-19T12:20:22,396 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=158, state=SUCCESS; OpenRegionProcedure 7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 in 166 msec 2024-11-19T12:20:22,397 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-11-19T12:20:22,397 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7d5f97e10ca89010b1b0ccd9ef5577c9, REOPEN/MOVE in 477 msec 2024-11-19T12:20:22,398 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-11-19T12:20:22,398 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 480 msec 2024-11-19T12:20:22,400 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 892 msec 2024-11-19T12:20:22,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-19T12:20:22,401 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31f7586d to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@314e353d 2024-11-19T12:20:22,404 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1767dc60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:20:22,405 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4949adfa to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@96e8e33 2024-11-19T12:20:22,409 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20c3d7a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:20:22,409 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53ef82c4 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e8d919c 2024-11-19T12:20:22,412 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10cd3d28, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:20:22,413 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0672325a to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@44b14279 2024-11-19T12:20:22,419 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d6c03ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:20:22,420 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x205568ef to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6eb94416 2024-11-19T12:20:22,422 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3395eba8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:20:22,423 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3852b0e3 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2406c4ea 2024-11-19T12:20:22,426 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a5e441, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:20:22,426 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4aa4b067 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58970c4d 2024-11-19T12:20:22,430 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@723a6cf2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:20:22,430 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1ca17819 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cb4faa4 2024-11-19T12:20:22,434 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d48543c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:20:22,434 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x77a6a62c to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c1c03a6 2024-11-19T12:20:22,438 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@336a5bad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:20:22,438 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x125099a6 to 127.0.0.1:64186 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e9db979 2024-11-19T12:20:22,445 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fcb3634, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:20:22,449 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:20:22,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-11-19T12:20:22,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-19T12:20:22,450 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:20:22,450 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:20:22,450 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:20:22,452 DEBUG [hconnection-0x4959bf77-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:20:22,453 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48938, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:20:22,454 DEBUG [hconnection-0xa272d52-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:20:22,455 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48948, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:20:22,458 DEBUG [hconnection-0x66b2406a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:20:22,458 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48954, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:20:22,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:22,462 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7d5f97e10ca89010b1b0ccd9ef5577c9 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-19T12:20:22,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=A 2024-11-19T12:20:22,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:22,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=B 2024-11-19T12:20:22,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:22,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=C 2024-11-19T12:20:22,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:22,472 DEBUG [hconnection-0x65bf6f59-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:20:22,472 DEBUG [hconnection-0x53e66373-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:20:22,473 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48956, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:20:22,473 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48958, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:20:22,476 DEBUG [hconnection-0x5ea21d33-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:20:22,477 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48968, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:20:22,477 DEBUG [hconnection-0x541d34ef-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:20:22,478 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48984, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:20:22,484 DEBUG [hconnection-0x63f3ae5a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:20:22,485 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49000, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:20:22,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:22,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018882498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:22,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:22,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018882501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:22,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:22,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018882501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:22,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:22,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018882502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:22,506 DEBUG [hconnection-0x3fa1c719-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:20:22,507 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49016, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:20:22,510 DEBUG [hconnection-0x4706ed16-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:20:22,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119fd8de8b7296e422386a63be83efdf335_7d5f97e10ca89010b1b0ccd9ef5577c9 is 50, key is test_row_0/A:col10/1732018822460/Put/seqid=0 2024-11-19T12:20:22,512 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49018, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:20:22,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:22,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018882513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:22,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742348_1524 (size=12154) 2024-11-19T12:20:22,518 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:22,522 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119fd8de8b7296e422386a63be83efdf335_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119fd8de8b7296e422386a63be83efdf335_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:22,522 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/15d988f7ba8441b7b215f1327702e4dc, store: [table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:22,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/15d988f7ba8441b7b215f1327702e4dc is 175, key is test_row_0/A:col10/1732018822460/Put/seqid=0 2024-11-19T12:20:22,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742349_1525 (size=30955) 2024-11-19T12:20:22,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-19T12:20:22,601 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:22,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-19T12:20:22,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:22,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:22,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:22,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:22,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:22,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:22,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:22,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018882602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:22,605 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:22,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018882604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:22,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:22,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018882604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:22,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:22,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018882604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:22,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:22,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018882615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:22,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-19T12:20:22,754 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:22,754 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-19T12:20:22,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:22,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:22,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:22,754 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:22,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:22,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:22,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:22,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018882804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:22,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:22,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:22,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018882808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:22,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018882808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:22,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:22,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018882808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:22,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:22,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018882817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:22,906 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:22,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-19T12:20:22,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:22,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:22,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:22,907 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:22,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:22,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:22,928 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/15d988f7ba8441b7b215f1327702e4dc 2024-11-19T12:20:22,953 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/2e02a2ab878f43eda0fb5e5d95eec8d8 is 50, key is test_row_0/B:col10/1732018822460/Put/seqid=0 2024-11-19T12:20:22,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742350_1526 (size=12001) 2024-11-19T12:20:22,957 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/2e02a2ab878f43eda0fb5e5d95eec8d8 2024-11-19T12:20:22,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/7cbe0066933e4eb4a4badc1b035e6e86 is 50, key is test_row_0/C:col10/1732018822460/Put/seqid=0 2024-11-19T12:20:22,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742351_1527 (size=12001) 2024-11-19T12:20:23,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-19T12:20:23,058 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:23,059 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-19T12:20:23,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:23,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:23,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:23,059 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:23,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:23,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:23,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:23,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018883108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:23,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:23,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018883110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:23,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:23,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018883111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:23,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:23,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018883111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:23,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:23,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018883120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:23,211 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:23,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-19T12:20:23,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:23,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:23,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:23,211 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:23,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:23,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:23,363 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:23,363 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-19T12:20:23,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:23,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:23,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:23,364 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:23,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:23,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:23,384 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/7cbe0066933e4eb4a4badc1b035e6e86 2024-11-19T12:20:23,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/15d988f7ba8441b7b215f1327702e4dc as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/15d988f7ba8441b7b215f1327702e4dc 2024-11-19T12:20:23,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/15d988f7ba8441b7b215f1327702e4dc, entries=150, sequenceid=15, filesize=30.2 K 2024-11-19T12:20:23,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/2e02a2ab878f43eda0fb5e5d95eec8d8 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/2e02a2ab878f43eda0fb5e5d95eec8d8 2024-11-19T12:20:23,394 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/2e02a2ab878f43eda0fb5e5d95eec8d8, entries=150, sequenceid=15, filesize=11.7 K 2024-11-19T12:20:23,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/7cbe0066933e4eb4a4badc1b035e6e86 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/7cbe0066933e4eb4a4badc1b035e6e86 2024-11-19T12:20:23,397 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/7cbe0066933e4eb4a4badc1b035e6e86, entries=150, sequenceid=15, filesize=11.7 K 2024-11-19T12:20:23,398 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 7d5f97e10ca89010b1b0ccd9ef5577c9 in 936ms, sequenceid=15, compaction requested=false 2024-11-19T12:20:23,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:23,516 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:23,516 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-19T12:20:23,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:23,516 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 7d5f97e10ca89010b1b0ccd9ef5577c9 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-19T12:20:23,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=A 2024-11-19T12:20:23,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:23,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=B 2024-11-19T12:20:23,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:23,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=C 2024-11-19T12:20:23,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:23,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411190af4129bebb44606b44a9d1650e3bda8_7d5f97e10ca89010b1b0ccd9ef5577c9 is 50, key is test_row_0/A:col10/1732018822500/Put/seqid=0 2024-11-19T12:20:23,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742352_1528 (size=12154) 2024-11-19T12:20:23,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:23,530 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411190af4129bebb44606b44a9d1650e3bda8_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411190af4129bebb44606b44a9d1650e3bda8_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:23,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/840d3bdb10ba4ed1a6b23bc77f56b443, store: [table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:23,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/840d3bdb10ba4ed1a6b23bc77f56b443 is 175, key is test_row_0/A:col10/1732018822500/Put/seqid=0 2024-11-19T12:20:23,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742353_1529 (size=30955) 2024-11-19T12:20:23,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-19T12:20:23,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:23,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:23,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:23,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018883616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:23,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:23,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018883617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:23,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:23,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018883617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:23,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:23,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018883618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:23,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:23,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018883624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:23,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:23,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018883719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:23,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:23,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018883719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:23,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:23,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018883720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:23,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:23,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018883922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:23,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:23,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018883923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:23,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:23,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018883923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:23,934 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/840d3bdb10ba4ed1a6b23bc77f56b443 2024-11-19T12:20:23,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/6409464d35b24ac7a7313c304be67616 is 50, key is test_row_0/B:col10/1732018822500/Put/seqid=0 2024-11-19T12:20:23,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742354_1530 (size=12001) 2024-11-19T12:20:24,134 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-19T12:20:24,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:24,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018884224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:24,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:24,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018884225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:24,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:24,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018884227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:24,344 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/6409464d35b24ac7a7313c304be67616 2024-11-19T12:20:24,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/8c296a18575745ee9b6a1f20dfc0c060 is 50, key is test_row_0/C:col10/1732018822500/Put/seqid=0 2024-11-19T12:20:24,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742355_1531 (size=12001) 2024-11-19T12:20:24,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-19T12:20:24,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:24,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018884623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:24,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018884633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:24,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018884727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:24,730 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018884729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:24,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018884731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:24,753 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/8c296a18575745ee9b6a1f20dfc0c060 2024-11-19T12:20:24,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/840d3bdb10ba4ed1a6b23bc77f56b443 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/840d3bdb10ba4ed1a6b23bc77f56b443 2024-11-19T12:20:24,760 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/840d3bdb10ba4ed1a6b23bc77f56b443, entries=150, sequenceid=42, filesize=30.2 K 2024-11-19T12:20:24,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/6409464d35b24ac7a7313c304be67616 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/6409464d35b24ac7a7313c304be67616 2024-11-19T12:20:24,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,763 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/6409464d35b24ac7a7313c304be67616, entries=150, sequenceid=42, filesize=11.7 K 2024-11-19T12:20:24,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/8c296a18575745ee9b6a1f20dfc0c060 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/8c296a18575745ee9b6a1f20dfc0c060 2024-11-19T12:20:24,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,767 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/8c296a18575745ee9b6a1f20dfc0c060, entries=150, sequenceid=42, filesize=11.7 K 2024-11-19T12:20:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,768 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 7d5f97e10ca89010b1b0ccd9ef5577c9 in 1252ms, sequenceid=42, compaction requested=false 2024-11-19T12:20:24,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:24,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:24,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-11-19T12:20:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-11-19T12:20:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,770 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-19T12:20:24,770 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3190 sec 2024-11-19T12:20:24,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,771 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 2.3220 sec 2024-11-19T12:20:24,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:24,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:25,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7d5f97e10ca89010b1b0ccd9ef5577c9 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-19T12:20:25,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=A 2024-11-19T12:20:25,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:25,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=B 2024-11-19T12:20:25,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:25,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=C 2024-11-19T12:20:25,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:25,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,749 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119affe47dc10fb481c99d0bcb27e6f97c7_7d5f97e10ca89010b1b0ccd9ef5577c9 is 50, key is test_row_0/A:col10/1732018823616/Put/seqid=0 2024-11-19T12:20:25,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742356_1532 (size=14594) 2024-11-19T12:20:25,774 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:25,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:25,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:25,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018885772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:25,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018885773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:25,778 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119affe47dc10fb481c99d0bcb27e6f97c7_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119affe47dc10fb481c99d0bcb27e6f97c7_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:25,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:25,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018885775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:25,778 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/c1880b5f2cfd4edebdff89bf87dc0e97, store: [table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:25,779 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/c1880b5f2cfd4edebdff89bf87dc0e97 is 175, key is test_row_0/A:col10/1732018823616/Put/seqid=0 2024-11-19T12:20:25,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742357_1533 (size=39549) 2024-11-19T12:20:25,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:25,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018885876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:25,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:25,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018885876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:25,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:25,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018885879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:26,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:26,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018886081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:26,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:26,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018886081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:26,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:26,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018886081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:26,190 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/c1880b5f2cfd4edebdff89bf87dc0e97 2024-11-19T12:20:26,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/84d5bdf0523648498f759f3d0304a95c is 50, key is test_row_0/B:col10/1732018823616/Put/seqid=0 2024-11-19T12:20:26,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742358_1534 (size=12001) 2024-11-19T12:20:26,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:26,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018886384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:26,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:26,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018886384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:26,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:26,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018886385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:26,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-19T12:20:26,554 INFO [Thread-2341 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-11-19T12:20:26,555 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:20:26,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-19T12:20:26,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-19T12:20:26,556 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:20:26,557 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:20:26,557 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:20:26,600 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/84d5bdf0523648498f759f3d0304a95c 2024-11-19T12:20:26,606 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/1a8cc492ce1d451f932164a53cdd9190 is 50, key is test_row_0/C:col10/1732018823616/Put/seqid=0 2024-11-19T12:20:26,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742359_1535 (size=12001) 2024-11-19T12:20:26,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:26,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018886643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:26,645 DEBUG [Thread-2337 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., hostname=af314c41f984,36047,1732018661455, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:20:26,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:26,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018886645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:26,646 DEBUG [Thread-2331 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4162 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., hostname=af314c41f984,36047,1732018661455, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:20:26,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-19T12:20:26,708 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:26,709 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-19T12:20:26,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:26,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:26,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:26,709 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:26,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:26,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:26,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-19T12:20:26,861 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:26,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-19T12:20:26,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:26,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:26,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:26,861 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:26,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:26,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:26,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:26,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018886888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:26,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:26,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018886889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:26,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:26,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018886890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:27,013 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:27,013 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-19T12:20:27,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:27,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:27,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:27,014 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:27,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:27,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:27,016 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/1a8cc492ce1d451f932164a53cdd9190 2024-11-19T12:20:27,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/c1880b5f2cfd4edebdff89bf87dc0e97 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/c1880b5f2cfd4edebdff89bf87dc0e97 2024-11-19T12:20:27,023 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/c1880b5f2cfd4edebdff89bf87dc0e97, entries=200, sequenceid=53, filesize=38.6 K 2024-11-19T12:20:27,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/84d5bdf0523648498f759f3d0304a95c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/84d5bdf0523648498f759f3d0304a95c 2024-11-19T12:20:27,026 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/84d5bdf0523648498f759f3d0304a95c, entries=150, sequenceid=53, filesize=11.7 K 2024-11-19T12:20:27,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/1a8cc492ce1d451f932164a53cdd9190 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/1a8cc492ce1d451f932164a53cdd9190 2024-11-19T12:20:27,029 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/1a8cc492ce1d451f932164a53cdd9190, entries=150, sequenceid=53, filesize=11.7 K 2024-11-19T12:20:27,030 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7d5f97e10ca89010b1b0ccd9ef5577c9 in 1288ms, sequenceid=53, compaction requested=true 2024-11-19T12:20:27,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:27,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:20:27,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:27,030 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:27,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:20:27,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:27,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:20:27,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:27,030 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:27,031 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:27,031 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:27,031 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/B is initiating minor compaction (all files) 2024-11-19T12:20:27,031 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/A is initiating minor compaction (all files) 2024-11-19T12:20:27,031 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/A in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:27,031 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/B in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:27,031 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/15d988f7ba8441b7b215f1327702e4dc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/840d3bdb10ba4ed1a6b23bc77f56b443, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/c1880b5f2cfd4edebdff89bf87dc0e97] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=99.1 K 2024-11-19T12:20:27,031 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:27,031 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/2e02a2ab878f43eda0fb5e5d95eec8d8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/6409464d35b24ac7a7313c304be67616, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/84d5bdf0523648498f759f3d0304a95c] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=35.2 K 2024-11-19T12:20:27,031 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/15d988f7ba8441b7b215f1327702e4dc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/840d3bdb10ba4ed1a6b23bc77f56b443, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/c1880b5f2cfd4edebdff89bf87dc0e97] 2024-11-19T12:20:27,031 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e02a2ab878f43eda0fb5e5d95eec8d8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732018822460 2024-11-19T12:20:27,031 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15d988f7ba8441b7b215f1327702e4dc, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732018822460 2024-11-19T12:20:27,032 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 840d3bdb10ba4ed1a6b23bc77f56b443, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732018822497 2024-11-19T12:20:27,032 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 6409464d35b24ac7a7313c304be67616, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732018822497 2024-11-19T12:20:27,032 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1880b5f2cfd4edebdff89bf87dc0e97, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732018823615 2024-11-19T12:20:27,032 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 84d5bdf0523648498f759f3d0304a95c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732018823616 2024-11-19T12:20:27,037 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:27,038 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#B#compaction#456 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:27,039 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/c84c715e49b648bfb5c1a4f67aab8218 is 50, key is test_row_0/B:col10/1732018823616/Put/seqid=0 2024-11-19T12:20:27,039 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241119b02f9e5c848e49ab9bbfb3ea72243f96_7d5f97e10ca89010b1b0ccd9ef5577c9 store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:27,048 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241119b02f9e5c848e49ab9bbfb3ea72243f96_7d5f97e10ca89010b1b0ccd9ef5577c9, store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:27,048 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119b02f9e5c848e49ab9bbfb3ea72243f96_7d5f97e10ca89010b1b0ccd9ef5577c9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:27,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742360_1536 (size=12104) 2024-11-19T12:20:27,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742361_1537 (size=4469) 2024-11-19T12:20:27,063 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#A#compaction#457 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:27,063 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/d9791e0183c5440497c32bb5abedbe5a is 175, key is test_row_0/A:col10/1732018823616/Put/seqid=0 2024-11-19T12:20:27,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742362_1538 (size=31058) 2024-11-19T12:20:27,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-19T12:20:27,165 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:27,166 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-19T12:20:27,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:27,166 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 7d5f97e10ca89010b1b0ccd9ef5577c9 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-19T12:20:27,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=A 2024-11-19T12:20:27,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:27,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=B 2024-11-19T12:20:27,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:27,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=C 2024-11-19T12:20:27,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:27,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411194a6595d887ff47b19b65a4e6162cb46a_7d5f97e10ca89010b1b0ccd9ef5577c9 is 50, key is test_row_0/A:col10/1732018825772/Put/seqid=0 2024-11-19T12:20:27,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742363_1539 (size=12154) 2024-11-19T12:20:27,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:27,186 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411194a6595d887ff47b19b65a4e6162cb46a_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411194a6595d887ff47b19b65a4e6162cb46a_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:27,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/1517baf059ec43da91e10de1a3dd0fe7, store: [table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:27,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/1517baf059ec43da91e10de1a3dd0fe7 is 175, key is test_row_0/A:col10/1732018825772/Put/seqid=0 2024-11-19T12:20:27,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742364_1540 (size=30955) 2024-11-19T12:20:27,191 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/1517baf059ec43da91e10de1a3dd0fe7 2024-11-19T12:20:27,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/d52ce390e3b94d0aa53f3a5a8356b5fe is 50, key is test_row_0/B:col10/1732018825772/Put/seqid=0 2024-11-19T12:20:27,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742365_1541 (size=12001) 2024-11-19T12:20:27,461 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/c84c715e49b648bfb5c1a4f67aab8218 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/c84c715e49b648bfb5c1a4f67aab8218 2024-11-19T12:20:27,465 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/B of 7d5f97e10ca89010b1b0ccd9ef5577c9 into c84c715e49b648bfb5c1a4f67aab8218(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:27,465 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:27,465 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/B, priority=13, startTime=1732018827030; duration=0sec 2024-11-19T12:20:27,465 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:27,465 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:B 2024-11-19T12:20:27,465 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:27,466 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:27,466 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/C is initiating minor compaction (all files) 2024-11-19T12:20:27,466 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/C in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:27,466 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/7cbe0066933e4eb4a4badc1b035e6e86, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/8c296a18575745ee9b6a1f20dfc0c060, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/1a8cc492ce1d451f932164a53cdd9190] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=35.2 K 2024-11-19T12:20:27,466 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 7cbe0066933e4eb4a4badc1b035e6e86, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732018822460 2024-11-19T12:20:27,467 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c296a18575745ee9b6a1f20dfc0c060, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732018822497 2024-11-19T12:20:27,467 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a8cc492ce1d451f932164a53cdd9190, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732018823616 2024-11-19T12:20:27,470 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/d9791e0183c5440497c32bb5abedbe5a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/d9791e0183c5440497c32bb5abedbe5a 2024-11-19T12:20:27,479 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#C#compaction#460 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:27,479 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/A of 7d5f97e10ca89010b1b0ccd9ef5577c9 into d9791e0183c5440497c32bb5abedbe5a(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:27,479 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:27,479 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/A, priority=13, startTime=1732018827030; duration=0sec 2024-11-19T12:20:27,479 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:27,479 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:A 2024-11-19T12:20:27,479 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/afc431727c3f4b1db5d618c7f467a8d3 is 50, key is test_row_0/C:col10/1732018823616/Put/seqid=0 2024-11-19T12:20:27,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742366_1542 (size=12104) 2024-11-19T12:20:27,600 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/d52ce390e3b94d0aa53f3a5a8356b5fe 2024-11-19T12:20:27,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/935363dd361f461782a2a1f101d0e450 is 50, key is test_row_0/C:col10/1732018825772/Put/seqid=0 2024-11-19T12:20:27,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742367_1543 (size=12001) 2024-11-19T12:20:27,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-19T12:20:27,887 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/afc431727c3f4b1db5d618c7f467a8d3 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/afc431727c3f4b1db5d618c7f467a8d3 2024-11-19T12:20:27,891 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/C of 7d5f97e10ca89010b1b0ccd9ef5577c9 into afc431727c3f4b1db5d618c7f467a8d3(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:27,891 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:27,891 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/C, priority=13, startTime=1732018827030; duration=0sec 2024-11-19T12:20:27,891 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:27,891 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:C 2024-11-19T12:20:27,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:27,896 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:27,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:27,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018887927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:27,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:27,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018887927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:27,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:27,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018887927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:28,009 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/935363dd361f461782a2a1f101d0e450 2024-11-19T12:20:28,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/1517baf059ec43da91e10de1a3dd0fe7 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1517baf059ec43da91e10de1a3dd0fe7 2024-11-19T12:20:28,016 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1517baf059ec43da91e10de1a3dd0fe7, entries=150, sequenceid=78, filesize=30.2 K 2024-11-19T12:20:28,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/d52ce390e3b94d0aa53f3a5a8356b5fe as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/d52ce390e3b94d0aa53f3a5a8356b5fe 2024-11-19T12:20:28,020 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/d52ce390e3b94d0aa53f3a5a8356b5fe, entries=150, sequenceid=78, filesize=11.7 K 2024-11-19T12:20:28,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/935363dd361f461782a2a1f101d0e450 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/935363dd361f461782a2a1f101d0e450 2024-11-19T12:20:28,023 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/935363dd361f461782a2a1f101d0e450, entries=150, sequenceid=78, filesize=11.7 K 2024-11-19T12:20:28,024 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7d5f97e10ca89010b1b0ccd9ef5577c9 in 857ms, sequenceid=78, compaction requested=false 2024-11-19T12:20:28,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:28,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:28,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-19T12:20:28,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-19T12:20:28,026 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-19T12:20:28,026 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4680 sec 2024-11-19T12:20:28,027 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 1.4710 sec 2024-11-19T12:20:28,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:28,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7d5f97e10ca89010b1b0ccd9ef5577c9 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-19T12:20:28,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=A 2024-11-19T12:20:28,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:28,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=B 2024-11-19T12:20:28,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:28,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=C 2024-11-19T12:20:28,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:28,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411199435f6278a0b47b5b561a7749c7003e4_7d5f97e10ca89010b1b0ccd9ef5577c9 is 50, key is test_row_0/A:col10/1732018828030/Put/seqid=0 2024-11-19T12:20:28,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742368_1544 (size=12154) 2024-11-19T12:20:28,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:28,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018888051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:28,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:28,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018888052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:28,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:28,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018888052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:28,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:28,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018888154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:28,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:28,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018888155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:28,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:28,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018888155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:28,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:28,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018888356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:28,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:28,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018888357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:28,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:28,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018888358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:28,443 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:28,446 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411199435f6278a0b47b5b561a7749c7003e4_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411199435f6278a0b47b5b561a7749c7003e4_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:28,447 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/c2d68fe0fcb5490e803f0aeb2295b322, store: [table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:28,447 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/c2d68fe0fcb5490e803f0aeb2295b322 is 175, key is test_row_0/A:col10/1732018828030/Put/seqid=0 2024-11-19T12:20:28,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742369_1545 (size=30955) 2024-11-19T12:20:28,458 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/c2d68fe0fcb5490e803f0aeb2295b322 2024-11-19T12:20:28,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/112c5311b2044768be3ed4dd5714da46 is 50, key is test_row_0/B:col10/1732018828030/Put/seqid=0 2024-11-19T12:20:28,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742370_1546 (size=12001) 2024-11-19T12:20:28,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-19T12:20:28,660 INFO [Thread-2341 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-19T12:20:28,661 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:20:28,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-19T12:20:28,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:28,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018888660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:28,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:28,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018888661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:28,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-19T12:20:28,662 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:20:28,663 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:20:28,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:28,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018888662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:28,663 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:20:28,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-19T12:20:28,814 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:28,814 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-19T12:20:28,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:28,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:28,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:28,815 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:28,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:28,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:28,870 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/112c5311b2044768be3ed4dd5714da46 2024-11-19T12:20:28,876 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/e7d61ad42bc843398c5a102e44b0c7ee is 50, key is test_row_0/C:col10/1732018828030/Put/seqid=0 2024-11-19T12:20:28,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742371_1547 (size=12001) 2024-11-19T12:20:28,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-19T12:20:28,966 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:28,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-19T12:20:28,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:28,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:28,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:28,967 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:28,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:28,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:29,119 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:29,119 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-19T12:20:29,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:29,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:29,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:29,119 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:29,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:29,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:29,165 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:29,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018889164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:29,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:29,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018889166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:29,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:29,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018889167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:29,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-19T12:20:29,271 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:29,272 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-19T12:20:29,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:29,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:29,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:29,272 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:29,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:29,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:29,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/e7d61ad42bc843398c5a102e44b0c7ee 2024-11-19T12:20:29,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/c2d68fe0fcb5490e803f0aeb2295b322 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/c2d68fe0fcb5490e803f0aeb2295b322 2024-11-19T12:20:29,289 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/c2d68fe0fcb5490e803f0aeb2295b322, entries=150, sequenceid=95, filesize=30.2 K 2024-11-19T12:20:29,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/112c5311b2044768be3ed4dd5714da46 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/112c5311b2044768be3ed4dd5714da46 2024-11-19T12:20:29,293 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/112c5311b2044768be3ed4dd5714da46, entries=150, sequenceid=95, filesize=11.7 K 2024-11-19T12:20:29,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/e7d61ad42bc843398c5a102e44b0c7ee as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/e7d61ad42bc843398c5a102e44b0c7ee 2024-11-19T12:20:29,296 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/e7d61ad42bc843398c5a102e44b0c7ee, entries=150, sequenceid=95, filesize=11.7 K 2024-11-19T12:20:29,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 7d5f97e10ca89010b1b0ccd9ef5577c9 in 1265ms, sequenceid=95, compaction requested=true 2024-11-19T12:20:29,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:29,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:20:29,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:29,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:20:29,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:29,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:20:29,296 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:29,296 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:29,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:29,297 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:29,297 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:29,297 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/A is initiating minor compaction (all files) 2024-11-19T12:20:29,297 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/B is initiating minor compaction (all files) 2024-11-19T12:20:29,297 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/B in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:29,297 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/A in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:29,297 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/d9791e0183c5440497c32bb5abedbe5a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1517baf059ec43da91e10de1a3dd0fe7, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/c2d68fe0fcb5490e803f0aeb2295b322] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=90.8 K 2024-11-19T12:20:29,297 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/c84c715e49b648bfb5c1a4f67aab8218, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/d52ce390e3b94d0aa53f3a5a8356b5fe, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/112c5311b2044768be3ed4dd5714da46] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=35.3 K 2024-11-19T12:20:29,298 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:29,298 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/d9791e0183c5440497c32bb5abedbe5a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1517baf059ec43da91e10de1a3dd0fe7, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/c2d68fe0fcb5490e803f0aeb2295b322] 2024-11-19T12:20:29,298 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting c84c715e49b648bfb5c1a4f67aab8218, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732018823616 2024-11-19T12:20:29,298 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9791e0183c5440497c32bb5abedbe5a, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732018823616 2024-11-19T12:20:29,298 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1517baf059ec43da91e10de1a3dd0fe7, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732018825766 2024-11-19T12:20:29,298 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting d52ce390e3b94d0aa53f3a5a8356b5fe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732018825766 2024-11-19T12:20:29,298 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 112c5311b2044768be3ed4dd5714da46, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732018827902 2024-11-19T12:20:29,298 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2d68fe0fcb5490e803f0aeb2295b322, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732018827902 2024-11-19T12:20:29,303 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:29,304 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#B#compaction#466 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:29,304 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411192ade98bd09b9407da0bac84d87e892f5_7d5f97e10ca89010b1b0ccd9ef5577c9 store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:29,304 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/ba2b685d8f574e6a89dde0d2fbdd4ebc is 50, key is test_row_0/B:col10/1732018828030/Put/seqid=0 2024-11-19T12:20:29,306 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411192ade98bd09b9407da0bac84d87e892f5_7d5f97e10ca89010b1b0ccd9ef5577c9, store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:29,306 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411192ade98bd09b9407da0bac84d87e892f5_7d5f97e10ca89010b1b0ccd9ef5577c9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:29,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742372_1548 (size=12207) 2024-11-19T12:20:29,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742373_1549 (size=4469) 2024-11-19T12:20:29,424 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:29,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-19T12:20:29,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:29,424 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 7d5f97e10ca89010b1b0ccd9ef5577c9 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-19T12:20:29,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=A 2024-11-19T12:20:29,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:29,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=B 2024-11-19T12:20:29,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:29,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=C 2024-11-19T12:20:29,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:29,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119f607e4dc044c4568b9487458dd3b6705_7d5f97e10ca89010b1b0ccd9ef5577c9 is 50, key is test_row_0/A:col10/1732018828051/Put/seqid=0 2024-11-19T12:20:29,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742374_1550 (size=12154) 2024-11-19T12:20:29,711 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#A#compaction#465 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:29,711 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/da676efe52dc42b68deb2c50cdfd89b4 is 175, key is test_row_0/A:col10/1732018828030/Put/seqid=0 2024-11-19T12:20:29,712 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/ba2b685d8f574e6a89dde0d2fbdd4ebc as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/ba2b685d8f574e6a89dde0d2fbdd4ebc 2024-11-19T12:20:29,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742375_1551 (size=31161) 2024-11-19T12:20:29,716 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/B of 7d5f97e10ca89010b1b0ccd9ef5577c9 into ba2b685d8f574e6a89dde0d2fbdd4ebc(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:29,716 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:29,716 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/B, priority=13, startTime=1732018829296; duration=0sec 2024-11-19T12:20:29,716 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:29,716 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:B 2024-11-19T12:20:29,717 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:29,717 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:29,717 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/C is initiating minor compaction (all files) 2024-11-19T12:20:29,718 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/C in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:29,718 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/afc431727c3f4b1db5d618c7f467a8d3, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/935363dd361f461782a2a1f101d0e450, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/e7d61ad42bc843398c5a102e44b0c7ee] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=35.3 K 2024-11-19T12:20:29,718 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting afc431727c3f4b1db5d618c7f467a8d3, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732018823616 2024-11-19T12:20:29,718 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 935363dd361f461782a2a1f101d0e450, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732018825766 2024-11-19T12:20:29,719 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting e7d61ad42bc843398c5a102e44b0c7ee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732018827902 2024-11-19T12:20:29,719 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/da676efe52dc42b68deb2c50cdfd89b4 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/da676efe52dc42b68deb2c50cdfd89b4 2024-11-19T12:20:29,723 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/A of 7d5f97e10ca89010b1b0ccd9ef5577c9 into da676efe52dc42b68deb2c50cdfd89b4(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:29,723 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:29,723 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/A, priority=13, startTime=1732018829296; duration=0sec 2024-11-19T12:20:29,723 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:29,723 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:A 2024-11-19T12:20:29,724 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#C#compaction#468 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:29,725 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/ed234f5fa2f64733bf318e13a822de7e is 50, key is test_row_0/C:col10/1732018828030/Put/seqid=0 2024-11-19T12:20:29,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742376_1552 (size=12207) 2024-11-19T12:20:29,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-19T12:20:29,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:29,843 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119f607e4dc044c4568b9487458dd3b6705_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119f607e4dc044c4568b9487458dd3b6705_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:29,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/0037e7fcc2c044ff81a3fd4f0737fef7, store: [table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:29,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/0037e7fcc2c044ff81a3fd4f0737fef7 is 175, key is test_row_0/A:col10/1732018828051/Put/seqid=0 2024-11-19T12:20:29,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742377_1553 (size=30955) 2024-11-19T12:20:30,133 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/ed234f5fa2f64733bf318e13a822de7e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/ed234f5fa2f64733bf318e13a822de7e 2024-11-19T12:20:30,136 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/C of 7d5f97e10ca89010b1b0ccd9ef5577c9 into ed234f5fa2f64733bf318e13a822de7e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:30,136 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:30,137 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/C, priority=13, startTime=1732018829296; duration=0sec 2024-11-19T12:20:30,137 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:30,137 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:C 2024-11-19T12:20:30,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:30,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:30,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:30,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018890178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:30,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:30,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018890178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:30,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:30,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018890178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:30,248 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/0037e7fcc2c044ff81a3fd4f0737fef7 2024-11-19T12:20:30,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/e78f01546a0c4312b686b378be714b12 is 50, key is test_row_0/B:col10/1732018828051/Put/seqid=0 2024-11-19T12:20:30,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742378_1554 (size=12001) 2024-11-19T12:20:30,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:30,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018890281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:30,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:30,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018890281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:30,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:30,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018890281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:30,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:30,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018890483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:30,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:30,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018890483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:30,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:30,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018890484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:30,658 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/e78f01546a0c4312b686b378be714b12 2024-11-19T12:20:30,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/6adb71c1cf6d470fbd7f092cd44578d3 is 50, key is test_row_0/C:col10/1732018828051/Put/seqid=0 2024-11-19T12:20:30,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742379_1555 (size=12001) 2024-11-19T12:20:30,668 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/6adb71c1cf6d470fbd7f092cd44578d3 2024-11-19T12:20:30,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/0037e7fcc2c044ff81a3fd4f0737fef7 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/0037e7fcc2c044ff81a3fd4f0737fef7 2024-11-19T12:20:30,674 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/0037e7fcc2c044ff81a3fd4f0737fef7, entries=150, sequenceid=117, filesize=30.2 K 2024-11-19T12:20:30,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/e78f01546a0c4312b686b378be714b12 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/e78f01546a0c4312b686b378be714b12 2024-11-19T12:20:30,677 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/e78f01546a0c4312b686b378be714b12, entries=150, sequenceid=117, filesize=11.7 K 2024-11-19T12:20:30,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/6adb71c1cf6d470fbd7f092cd44578d3 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/6adb71c1cf6d470fbd7f092cd44578d3 2024-11-19T12:20:30,681 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/6adb71c1cf6d470fbd7f092cd44578d3, entries=150, sequenceid=117, filesize=11.7 K 2024-11-19T12:20:30,681 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 7d5f97e10ca89010b1b0ccd9ef5577c9 in 1257ms, sequenceid=117, compaction requested=false 2024-11-19T12:20:30,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:30,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:30,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-19T12:20:30,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-19T12:20:30,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:30,683 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7d5f97e10ca89010b1b0ccd9ef5577c9 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-19T12:20:30,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=A 2024-11-19T12:20:30,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:30,684 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-19T12:20:30,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=B 2024-11-19T12:20:30,684 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0190 sec 2024-11-19T12:20:30,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:30,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=C 2024-11-19T12:20:30,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:30,685 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 2.0230 sec 2024-11-19T12:20:30,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119918d38d792a649459ffc171811b00e02_7d5f97e10ca89010b1b0ccd9ef5577c9 is 50, key is test_row_0/A:col10/1732018830177/Put/seqid=0 2024-11-19T12:20:30,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742380_1556 (size=12304) 2024-11-19T12:20:30,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:30,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018890710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:30,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:30,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018890710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:30,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-19T12:20:30,766 INFO [Thread-2341 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-19T12:20:30,767 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:20:30,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-19T12:20:30,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-19T12:20:30,768 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:20:30,769 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:20:30,769 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:20:30,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:30,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:30,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018890787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:30,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018890787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:30,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:30,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018890788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:30,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:30,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018890814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:30,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:30,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018890814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:30,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-19T12:20:30,920 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:30,920 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-19T12:20:30,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:30,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:30,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:30,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:30,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:30,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,017 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:31,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018891016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:31,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:31,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018891016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:31,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-19T12:20:31,072 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:31,073 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-19T12:20:31,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:31,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:31,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:31,073 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,096 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:31,100 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119918d38d792a649459ffc171811b00e02_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119918d38d792a649459ffc171811b00e02_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:31,100 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/f397c298ed8544e5905bca53802ad3ee, store: [table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:31,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/f397c298ed8544e5905bca53802ad3ee is 175, key is test_row_0/A:col10/1732018830177/Put/seqid=0 2024-11-19T12:20:31,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742381_1557 (size=31105) 2024-11-19T12:20:31,225 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:31,225 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-19T12:20:31,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:31,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:31,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:31,226 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:31,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018891290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:31,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:31,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018891291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:31,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:31,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018891291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:31,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:31,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018891318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:31,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:31,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018891318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:31,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-19T12:20:31,378 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:31,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-19T12:20:31,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:31,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:31,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:31,378 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,505 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=136, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/f397c298ed8544e5905bca53802ad3ee 2024-11-19T12:20:31,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/0bfaae182c474941b6aa650c3acdbf82 is 50, key is test_row_0/B:col10/1732018830177/Put/seqid=0 2024-11-19T12:20:31,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742382_1558 (size=12151) 2024-11-19T12:20:31,530 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:31,530 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-19T12:20:31,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:31,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:31,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:31,531 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,682 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:31,682 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-19T12:20:31,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:31,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:31,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:31,682 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:31,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018891823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:31,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:31,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018891824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:31,834 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:31,835 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-19T12:20:31,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:31,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:31,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:31,835 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:31,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-19T12:20:31,915 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/0bfaae182c474941b6aa650c3acdbf82 2024-11-19T12:20:31,921 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/06f2416797344b93b1c5cddf846a8e04 is 50, key is test_row_0/C:col10/1732018830177/Put/seqid=0 2024-11-19T12:20:31,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742383_1559 (size=12151) 2024-11-19T12:20:31,925 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/06f2416797344b93b1c5cddf846a8e04 2024-11-19T12:20:31,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/f397c298ed8544e5905bca53802ad3ee as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/f397c298ed8544e5905bca53802ad3ee 2024-11-19T12:20:31,930 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/f397c298ed8544e5905bca53802ad3ee, entries=150, sequenceid=136, filesize=30.4 K 2024-11-19T12:20:31,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/0bfaae182c474941b6aa650c3acdbf82 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/0bfaae182c474941b6aa650c3acdbf82 2024-11-19T12:20:31,934 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/0bfaae182c474941b6aa650c3acdbf82, entries=150, sequenceid=136, filesize=11.9 K 2024-11-19T12:20:31,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/06f2416797344b93b1c5cddf846a8e04 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/06f2416797344b93b1c5cddf846a8e04 2024-11-19T12:20:31,938 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/06f2416797344b93b1c5cddf846a8e04, entries=150, sequenceid=136, filesize=11.9 K 2024-11-19T12:20:31,939 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 7d5f97e10ca89010b1b0ccd9ef5577c9 in 1256ms, sequenceid=136, compaction requested=true 2024-11-19T12:20:31,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:31,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:20:31,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:31,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:20:31,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:31,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:20:31,939 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:31,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:31,939 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:31,940 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93221 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:31,940 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/A is initiating minor compaction (all files) 2024-11-19T12:20:31,940 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/A in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:31,940 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/da676efe52dc42b68deb2c50cdfd89b4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/0037e7fcc2c044ff81a3fd4f0737fef7, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/f397c298ed8544e5905bca53802ad3ee] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=91.0 K 2024-11-19T12:20:31,940 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:31,940 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/da676efe52dc42b68deb2c50cdfd89b4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/0037e7fcc2c044ff81a3fd4f0737fef7, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/f397c298ed8544e5905bca53802ad3ee] 2024-11-19T12:20:31,940 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:31,940 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting da676efe52dc42b68deb2c50cdfd89b4, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732018827902 2024-11-19T12:20:31,940 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/B is initiating minor compaction (all files) 2024-11-19T12:20:31,940 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/B in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:31,940 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/ba2b685d8f574e6a89dde0d2fbdd4ebc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/e78f01546a0c4312b686b378be714b12, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/0bfaae182c474941b6aa650c3acdbf82] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=35.5 K 2024-11-19T12:20:31,941 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0037e7fcc2c044ff81a3fd4f0737fef7, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732018828050 2024-11-19T12:20:31,941 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting ba2b685d8f574e6a89dde0d2fbdd4ebc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732018827902 2024-11-19T12:20:31,941 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting e78f01546a0c4312b686b378be714b12, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732018828050 2024-11-19T12:20:31,941 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting f397c298ed8544e5905bca53802ad3ee, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1732018830177 2024-11-19T12:20:31,941 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bfaae182c474941b6aa650c3acdbf82, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1732018830177 2024-11-19T12:20:31,948 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:31,950 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241119b3180c43ee28417daaf267ede118fd6d_7d5f97e10ca89010b1b0ccd9ef5577c9 store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:31,951 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#B#compaction#475 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:31,951 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/4746dc5862724be4ad4d2ee6c6c1c629 is 50, key is test_row_0/B:col10/1732018830177/Put/seqid=0 2024-11-19T12:20:31,952 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241119b3180c43ee28417daaf267ede118fd6d_7d5f97e10ca89010b1b0ccd9ef5577c9, store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:31,952 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119b3180c43ee28417daaf267ede118fd6d_7d5f97e10ca89010b1b0ccd9ef5577c9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:31,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742385_1561 (size=4469) 2024-11-19T12:20:31,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742384_1560 (size=12459) 2024-11-19T12:20:31,957 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#A#compaction#474 average throughput is 2.71 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:31,958 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/7c3b3b82badb413183acc6df87a8c694 is 175, key is test_row_0/A:col10/1732018830177/Put/seqid=0 2024-11-19T12:20:31,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742386_1562 (size=31413) 2024-11-19T12:20:31,986 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:31,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-19T12:20:31,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:31,987 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 7d5f97e10ca89010b1b0ccd9ef5577c9 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-19T12:20:31,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=A 2024-11-19T12:20:31,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:31,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=B 2024-11-19T12:20:31,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:31,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=C 2024-11-19T12:20:31,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:31,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111954bd0c8fc6d14eaf8cfa1fa1ced2d347_7d5f97e10ca89010b1b0ccd9ef5577c9 is 50, key is test_row_0/A:col10/1732018830709/Put/seqid=0 2024-11-19T12:20:31,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742387_1563 (size=12304) 2024-11-19T12:20:31,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:31,999 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111954bd0c8fc6d14eaf8cfa1fa1ced2d347_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111954bd0c8fc6d14eaf8cfa1fa1ced2d347_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:31,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/90eebebb8291489abd2427d8c8338f9c, store: [table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:32,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/90eebebb8291489abd2427d8c8338f9c is 175, key is test_row_0/A:col10/1732018830709/Put/seqid=0 2024-11-19T12:20:32,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742388_1564 (size=31105) 2024-11-19T12:20:32,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:32,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:32,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:32,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018892306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:32,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:32,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:32,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018892308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:32,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018892308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:32,360 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/4746dc5862724be4ad4d2ee6c6c1c629 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/4746dc5862724be4ad4d2ee6c6c1c629 2024-11-19T12:20:32,364 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/B of 7d5f97e10ca89010b1b0ccd9ef5577c9 into 4746dc5862724be4ad4d2ee6c6c1c629(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:32,364 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:32,364 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/B, priority=13, startTime=1732018831939; duration=0sec 2024-11-19T12:20:32,364 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:32,364 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:B 2024-11-19T12:20:32,364 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:32,365 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:32,365 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/C is initiating minor compaction (all files) 2024-11-19T12:20:32,365 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/C in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:32,365 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/ed234f5fa2f64733bf318e13a822de7e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/6adb71c1cf6d470fbd7f092cd44578d3, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/06f2416797344b93b1c5cddf846a8e04] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=35.5 K 2024-11-19T12:20:32,366 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting ed234f5fa2f64733bf318e13a822de7e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732018827902 2024-11-19T12:20:32,366 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 6adb71c1cf6d470fbd7f092cd44578d3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732018828050 2024-11-19T12:20:32,366 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 06f2416797344b93b1c5cddf846a8e04, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1732018830177 2024-11-19T12:20:32,372 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#C#compaction#477 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:32,373 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/b69a70161abe4a09af2561d3393ab601 is 50, key is test_row_0/C:col10/1732018830177/Put/seqid=0 2024-11-19T12:20:32,375 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/7c3b3b82badb413183acc6df87a8c694 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/7c3b3b82badb413183acc6df87a8c694 2024-11-19T12:20:32,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742389_1565 (size=12459) 2024-11-19T12:20:32,378 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/A of 7d5f97e10ca89010b1b0ccd9ef5577c9 into 7c3b3b82badb413183acc6df87a8c694(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:32,378 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:32,378 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/A, priority=13, startTime=1732018831939; duration=0sec 2024-11-19T12:20:32,378 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:32,379 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:A 2024-11-19T12:20:32,403 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/90eebebb8291489abd2427d8c8338f9c 2024-11-19T12:20:32,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/1410d94891f346dd8f6a736cbbfcd528 is 50, key is test_row_0/B:col10/1732018830709/Put/seqid=0 2024-11-19T12:20:32,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:32,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018892409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:32,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742390_1566 (size=12151) 2024-11-19T12:20:32,412 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/1410d94891f346dd8f6a736cbbfcd528 2024-11-19T12:20:32,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:32,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018892411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:32,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:32,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018892411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:32,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/9af1609ed3424b7a84d56952d0c767fe is 50, key is test_row_0/C:col10/1732018830709/Put/seqid=0 2024-11-19T12:20:32,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742391_1567 (size=12151) 2024-11-19T12:20:32,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:32,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018892611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:32,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:32,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018892613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:32,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:32,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018892614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:32,780 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/b69a70161abe4a09af2561d3393ab601 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/b69a70161abe4a09af2561d3393ab601 2024-11-19T12:20:32,784 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/C of 7d5f97e10ca89010b1b0ccd9ef5577c9 into b69a70161abe4a09af2561d3393ab601(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:32,784 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:32,784 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/C, priority=13, startTime=1732018831939; duration=0sec 2024-11-19T12:20:32,784 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:32,784 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:C 2024-11-19T12:20:32,821 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/9af1609ed3424b7a84d56952d0c767fe 2024-11-19T12:20:32,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/90eebebb8291489abd2427d8c8338f9c as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/90eebebb8291489abd2427d8c8338f9c 2024-11-19T12:20:32,827 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/90eebebb8291489abd2427d8c8338f9c, entries=150, sequenceid=157, filesize=30.4 K 2024-11-19T12:20:32,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/1410d94891f346dd8f6a736cbbfcd528 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/1410d94891f346dd8f6a736cbbfcd528 2024-11-19T12:20:32,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:32,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018892829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:32,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:32,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018892829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:32,831 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/1410d94891f346dd8f6a736cbbfcd528, entries=150, sequenceid=157, filesize=11.9 K 2024-11-19T12:20:32,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/9af1609ed3424b7a84d56952d0c767fe as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/9af1609ed3424b7a84d56952d0c767fe 2024-11-19T12:20:32,834 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/9af1609ed3424b7a84d56952d0c767fe, entries=150, sequenceid=157, filesize=11.9 K 2024-11-19T12:20:32,835 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 7d5f97e10ca89010b1b0ccd9ef5577c9 in 848ms, sequenceid=157, compaction requested=false 2024-11-19T12:20:32,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:32,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:32,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-19T12:20:32,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-19T12:20:32,837 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-19T12:20:32,837 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0670 sec 2024-11-19T12:20:32,839 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 2.0710 sec 2024-11-19T12:20:32,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-19T12:20:32,871 INFO [Thread-2341 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-19T12:20:32,873 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:20:32,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-11-19T12:20:32,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-19T12:20:32,874 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:20:32,874 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:20:32,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:20:32,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:32,914 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7d5f97e10ca89010b1b0ccd9ef5577c9 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-19T12:20:32,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=A 2024-11-19T12:20:32,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:32,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=B 2024-11-19T12:20:32,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:32,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=C 2024-11-19T12:20:32,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:32,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111935fd9c10cba84e958cc58a6a9f7ee3bc_7d5f97e10ca89010b1b0ccd9ef5577c9 is 50, key is test_row_0/A:col10/1732018832913/Put/seqid=0 2024-11-19T12:20:32,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742392_1568 (size=14794) 2024-11-19T12:20:32,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:32,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018892932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:32,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:32,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018892934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:32,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:32,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018892935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:32,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-19T12:20:33,026 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:33,026 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-19T12:20:33,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:33,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:33,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:33,026 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,037 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:33,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018893036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:33,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:33,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018893037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:33,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:33,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018893038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:33,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-19T12:20:33,178 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:33,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-19T12:20:33,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:33,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:33,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:33,179 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:33,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018893238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:33,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:33,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018893241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:33,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:33,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018893241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:33,323 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:33,326 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111935fd9c10cba84e958cc58a6a9f7ee3bc_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111935fd9c10cba84e958cc58a6a9f7ee3bc_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:33,327 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/8155c8bd852d4163850162f403f7cccd, store: [table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:33,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/8155c8bd852d4163850162f403f7cccd is 175, key is test_row_0/A:col10/1732018832913/Put/seqid=0 2024-11-19T12:20:33,330 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:33,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-19T12:20:33,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:33,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:33,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:33,331 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742393_1569 (size=39749) 2024-11-19T12:20:33,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,332 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=176, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/8155c8bd852d4163850162f403f7cccd 2024-11-19T12:20:33,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/5795845dee8a402eaaf045d17e8c793a is 50, key is test_row_0/B:col10/1732018832913/Put/seqid=0 2024-11-19T12:20:33,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742394_1570 (size=12151) 2024-11-19T12:20:33,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-19T12:20:33,483 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:33,483 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-19T12:20:33,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:33,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:33,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:33,484 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:33,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018893542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:33,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:33,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018893544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:33,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:33,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018893545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:33,636 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:33,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-19T12:20:33,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:33,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:33,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:33,636 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,742 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/5795845dee8a402eaaf045d17e8c793a 2024-11-19T12:20:33,748 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/567d79a8ba7f4ef7939a47283886c6cb is 50, key is test_row_0/C:col10/1732018832913/Put/seqid=0 2024-11-19T12:20:33,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742395_1571 (size=12151) 2024-11-19T12:20:33,788 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:33,789 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-19T12:20:33,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:33,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:33,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:33,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,941 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:33,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-19T12:20:33,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:33,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:33,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:33,942 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:33,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-19T12:20:34,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:34,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018894045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:34,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:34,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018894048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:34,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:34,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018894050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:34,094 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:34,094 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-19T12:20:34,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:34,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:34,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:34,094 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:34,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:34,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:34,152 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/567d79a8ba7f4ef7939a47283886c6cb 2024-11-19T12:20:34,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/8155c8bd852d4163850162f403f7cccd as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/8155c8bd852d4163850162f403f7cccd 2024-11-19T12:20:34,158 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/8155c8bd852d4163850162f403f7cccd, entries=200, sequenceid=176, filesize=38.8 K 2024-11-19T12:20:34,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/5795845dee8a402eaaf045d17e8c793a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/5795845dee8a402eaaf045d17e8c793a 2024-11-19T12:20:34,162 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/5795845dee8a402eaaf045d17e8c793a, entries=150, sequenceid=176, filesize=11.9 K 2024-11-19T12:20:34,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/567d79a8ba7f4ef7939a47283886c6cb as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/567d79a8ba7f4ef7939a47283886c6cb 2024-11-19T12:20:34,165 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/567d79a8ba7f4ef7939a47283886c6cb, entries=150, sequenceid=176, filesize=11.9 K 2024-11-19T12:20:34,166 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 7d5f97e10ca89010b1b0ccd9ef5577c9 in 1252ms, sequenceid=176, compaction requested=true 2024-11-19T12:20:34,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:34,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:20:34,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:34,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:20:34,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:34,166 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:34,166 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:34,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:20:34,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:34,167 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:34,167 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102267 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:34,167 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/B is initiating minor compaction (all files) 2024-11-19T12:20:34,167 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/A is initiating minor compaction (all files) 2024-11-19T12:20:34,167 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/B in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:34,167 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/A in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:34,167 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/4746dc5862724be4ad4d2ee6c6c1c629, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/1410d94891f346dd8f6a736cbbfcd528, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/5795845dee8a402eaaf045d17e8c793a] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=35.9 K 2024-11-19T12:20:34,167 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/7c3b3b82badb413183acc6df87a8c694, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/90eebebb8291489abd2427d8c8338f9c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/8155c8bd852d4163850162f403f7cccd] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=99.9 K 2024-11-19T12:20:34,167 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:34,167 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/7c3b3b82badb413183acc6df87a8c694, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/90eebebb8291489abd2427d8c8338f9c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/8155c8bd852d4163850162f403f7cccd] 2024-11-19T12:20:34,168 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 4746dc5862724be4ad4d2ee6c6c1c629, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1732018830177 2024-11-19T12:20:34,168 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c3b3b82badb413183acc6df87a8c694, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1732018830177 2024-11-19T12:20:34,168 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90eebebb8291489abd2427d8c8338f9c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732018830688 2024-11-19T12:20:34,168 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 1410d94891f346dd8f6a736cbbfcd528, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732018830688 2024-11-19T12:20:34,168 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8155c8bd852d4163850162f403f7cccd, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732018832305 2024-11-19T12:20:34,168 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 5795845dee8a402eaaf045d17e8c793a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732018832305 2024-11-19T12:20:34,173 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:34,174 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#B#compaction#484 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:34,174 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/eb6f99aecc2549d5aa719be6daf3c92f is 50, key is test_row_0/B:col10/1732018832913/Put/seqid=0 2024-11-19T12:20:34,176 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411196cdd3e8af13d4b21a7871a616209b797_7d5f97e10ca89010b1b0ccd9ef5577c9 store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:34,177 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411196cdd3e8af13d4b21a7871a616209b797_7d5f97e10ca89010b1b0ccd9ef5577c9, store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:34,177 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411196cdd3e8af13d4b21a7871a616209b797_7d5f97e10ca89010b1b0ccd9ef5577c9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:34,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742396_1572 (size=12561) 2024-11-19T12:20:34,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742397_1573 (size=4469) 2024-11-19T12:20:34,246 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:34,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-19T12:20:34,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:34,246 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing 7d5f97e10ca89010b1b0ccd9ef5577c9 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-19T12:20:34,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=A 2024-11-19T12:20:34,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:34,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=B 2024-11-19T12:20:34,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:34,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=C 2024-11-19T12:20:34,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:34,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411196471a0d30bb04ae0815e94b07543dfd7_7d5f97e10ca89010b1b0ccd9ef5577c9 is 50, key is test_row_0/A:col10/1732018832928/Put/seqid=0 2024-11-19T12:20:34,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742398_1574 (size=12304) 2024-11-19T12:20:34,593 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/eb6f99aecc2549d5aa719be6daf3c92f as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/eb6f99aecc2549d5aa719be6daf3c92f 2024-11-19T12:20:34,594 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#A#compaction#483 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:34,594 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/2e24ce8677ac406da5368d2428f0e65a is 175, key is test_row_0/A:col10/1732018832913/Put/seqid=0 2024-11-19T12:20:34,596 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/B of 7d5f97e10ca89010b1b0ccd9ef5577c9 into eb6f99aecc2549d5aa719be6daf3c92f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:34,596 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:34,596 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/B, priority=13, startTime=1732018834166; duration=0sec 2024-11-19T12:20:34,597 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:34,597 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:B 2024-11-19T12:20:34,597 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:34,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742399_1575 (size=31515) 2024-11-19T12:20:34,605 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:34,606 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/C is initiating minor compaction (all files) 2024-11-19T12:20:34,606 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/C in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:34,606 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/b69a70161abe4a09af2561d3393ab601, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/9af1609ed3424b7a84d56952d0c767fe, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/567d79a8ba7f4ef7939a47283886c6cb] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=35.9 K 2024-11-19T12:20:34,607 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting b69a70161abe4a09af2561d3393ab601, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1732018830177 2024-11-19T12:20:34,607 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 9af1609ed3424b7a84d56952d0c767fe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732018830688 2024-11-19T12:20:34,607 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 567d79a8ba7f4ef7939a47283886c6cb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732018832305 2024-11-19T12:20:34,614 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#C#compaction#486 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:34,614 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/f9f984f6d3d447449138df062a0d6aaa is 50, key is test_row_0/C:col10/1732018832913/Put/seqid=0 2024-11-19T12:20:34,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742400_1576 (size=12561) 2024-11-19T12:20:34,635 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/f9f984f6d3d447449138df062a0d6aaa as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/f9f984f6d3d447449138df062a0d6aaa 2024-11-19T12:20:34,638 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/C of 7d5f97e10ca89010b1b0ccd9ef5577c9 into f9f984f6d3d447449138df062a0d6aaa(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:34,638 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:34,638 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/C, priority=13, startTime=1732018834166; duration=0sec 2024-11-19T12:20:34,639 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:34,639 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:C 2024-11-19T12:20:34,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:34,658 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411196471a0d30bb04ae0815e94b07543dfd7_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411196471a0d30bb04ae0815e94b07543dfd7_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:34,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/51c6e99b51eb46d4ae4205469b46c0fe, store: [table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:34,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/51c6e99b51eb46d4ae4205469b46c0fe is 175, key is test_row_0/A:col10/1732018832928/Put/seqid=0 2024-11-19T12:20:34,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742401_1577 (size=31105) 2024-11-19T12:20:34,678 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=196, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/51c6e99b51eb46d4ae4205469b46c0fe 2024-11-19T12:20:34,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/f0f96a2f472c4f79b1003a1b42479d39 is 50, key is test_row_0/B:col10/1732018832928/Put/seqid=0 2024-11-19T12:20:34,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742402_1578 (size=12151) 2024-11-19T12:20:34,688 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/f0f96a2f472c4f79b1003a1b42479d39 2024-11-19T12:20:34,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/c3b50ae545694db88dab2072ea2fa61e is 50, key is test_row_0/C:col10/1732018832928/Put/seqid=0 2024-11-19T12:20:34,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742403_1579 (size=12151) 2024-11-19T12:20:34,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:34,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:34,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:34,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:34,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018894855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:34,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018894855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:34,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:34,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018894959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:34,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:34,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018894959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:34,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-19T12:20:35,010 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/2e24ce8677ac406da5368d2428f0e65a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/2e24ce8677ac406da5368d2428f0e65a 2024-11-19T12:20:35,014 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/A of 7d5f97e10ca89010b1b0ccd9ef5577c9 into 2e24ce8677ac406da5368d2428f0e65a(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:35,014 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:35,014 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/A, priority=13, startTime=1732018834166; duration=0sec 2024-11-19T12:20:35,014 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:35,014 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:A 2024-11-19T12:20:35,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:35,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018895053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:35,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:35,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018895054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:35,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:35,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018895061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:35,097 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/c3b50ae545694db88dab2072ea2fa61e 2024-11-19T12:20:35,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/51c6e99b51eb46d4ae4205469b46c0fe as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/51c6e99b51eb46d4ae4205469b46c0fe 2024-11-19T12:20:35,105 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/51c6e99b51eb46d4ae4205469b46c0fe, entries=150, sequenceid=196, filesize=30.4 K 2024-11-19T12:20:35,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/f0f96a2f472c4f79b1003a1b42479d39 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/f0f96a2f472c4f79b1003a1b42479d39 2024-11-19T12:20:35,109 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/f0f96a2f472c4f79b1003a1b42479d39, entries=150, sequenceid=196, filesize=11.9 K 2024-11-19T12:20:35,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/c3b50ae545694db88dab2072ea2fa61e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/c3b50ae545694db88dab2072ea2fa61e 2024-11-19T12:20:35,112 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/c3b50ae545694db88dab2072ea2fa61e, entries=150, sequenceid=196, filesize=11.9 K 2024-11-19T12:20:35,113 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 7d5f97e10ca89010b1b0ccd9ef5577c9 in 867ms, sequenceid=196, compaction requested=false 2024-11-19T12:20:35,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:35,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:35,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-11-19T12:20:35,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-11-19T12:20:35,115 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-19T12:20:35,115 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2400 sec 2024-11-19T12:20:35,117 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 2.2430 sec 2024-11-19T12:20:35,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:35,163 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7d5f97e10ca89010b1b0ccd9ef5577c9 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-19T12:20:35,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=A 2024-11-19T12:20:35,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:35,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=B 2024-11-19T12:20:35,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:35,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=C 2024-11-19T12:20:35,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:35,168 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119441f940ca823444bb2ff86157dbaa77a_7d5f97e10ca89010b1b0ccd9ef5577c9 is 50, key is test_row_0/A:col10/1732018835162/Put/seqid=0 2024-11-19T12:20:35,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742404_1580 (size=12304) 2024-11-19T12:20:35,172 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:35,176 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119441f940ca823444bb2ff86157dbaa77a_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119441f940ca823444bb2ff86157dbaa77a_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:35,177 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/ed821c4d3a4b4ee288da014bc818f70b, store: [table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:35,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/ed821c4d3a4b4ee288da014bc818f70b is 175, key is test_row_0/A:col10/1732018835162/Put/seqid=0 2024-11-19T12:20:35,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742405_1581 (size=31105) 2024-11-19T12:20:35,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:35,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018895186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:35,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:35,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018895187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:35,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:35,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018895290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:35,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:35,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018895290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:35,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:35,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018895492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:35,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:35,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018895493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:35,581 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=216, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/ed821c4d3a4b4ee288da014bc818f70b 2024-11-19T12:20:35,586 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/3b8a1850e4fd4031984147f724f5d34b is 50, key is test_row_0/B:col10/1732018835162/Put/seqid=0 2024-11-19T12:20:35,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742406_1582 (size=12151) 2024-11-19T12:20:35,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:35,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018895794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:35,796 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:35,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018895795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:35,993 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/3b8a1850e4fd4031984147f724f5d34b 2024-11-19T12:20:35,998 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/2cad64868a364639b583990e8d9a4cf8 is 50, key is test_row_0/C:col10/1732018835162/Put/seqid=0 2024-11-19T12:20:36,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742407_1583 (size=12151) 2024-11-19T12:20:36,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:36,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018896301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:36,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:36,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018896301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:36,402 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/2cad64868a364639b583990e8d9a4cf8 2024-11-19T12:20:36,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/ed821c4d3a4b4ee288da014bc818f70b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/ed821c4d3a4b4ee288da014bc818f70b 2024-11-19T12:20:36,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/ed821c4d3a4b4ee288da014bc818f70b, entries=150, sequenceid=216, filesize=30.4 K 2024-11-19T12:20:36,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/3b8a1850e4fd4031984147f724f5d34b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/3b8a1850e4fd4031984147f724f5d34b 2024-11-19T12:20:36,411 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/3b8a1850e4fd4031984147f724f5d34b, entries=150, sequenceid=216, filesize=11.9 K 2024-11-19T12:20:36,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/2cad64868a364639b583990e8d9a4cf8 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/2cad64868a364639b583990e8d9a4cf8 2024-11-19T12:20:36,414 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/2cad64868a364639b583990e8d9a4cf8, entries=150, sequenceid=216, filesize=11.9 K 2024-11-19T12:20:36,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 7d5f97e10ca89010b1b0ccd9ef5577c9 in 1252ms, sequenceid=216, compaction requested=true 2024-11-19T12:20:36,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:36,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:20:36,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:36,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:20:36,415 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:36,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:36,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:20:36,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:36,415 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:36,416 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93725 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:36,416 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:36,416 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/B is initiating minor compaction (all files) 2024-11-19T12:20:36,416 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/A is initiating minor compaction (all files) 2024-11-19T12:20:36,416 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/B in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:36,416 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/A in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:36,416 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/eb6f99aecc2549d5aa719be6daf3c92f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/f0f96a2f472c4f79b1003a1b42479d39, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/3b8a1850e4fd4031984147f724f5d34b] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=36.0 K 2024-11-19T12:20:36,416 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/2e24ce8677ac406da5368d2428f0e65a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/51c6e99b51eb46d4ae4205469b46c0fe, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/ed821c4d3a4b4ee288da014bc818f70b] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=91.5 K 2024-11-19T12:20:36,416 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:36,416 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/2e24ce8677ac406da5368d2428f0e65a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/51c6e99b51eb46d4ae4205469b46c0fe, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/ed821c4d3a4b4ee288da014bc818f70b] 2024-11-19T12:20:36,416 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting eb6f99aecc2549d5aa719be6daf3c92f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732018832305 2024-11-19T12:20:36,417 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting f0f96a2f472c4f79b1003a1b42479d39, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732018832928 2024-11-19T12:20:36,417 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e24ce8677ac406da5368d2428f0e65a, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732018832305 2024-11-19T12:20:36,417 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b8a1850e4fd4031984147f724f5d34b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732018834851 2024-11-19T12:20:36,417 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51c6e99b51eb46d4ae4205469b46c0fe, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732018832928 2024-11-19T12:20:36,417 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed821c4d3a4b4ee288da014bc818f70b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732018834851 2024-11-19T12:20:36,422 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:36,424 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411195aba552656524c469f402c61b963c1fa_7d5f97e10ca89010b1b0ccd9ef5577c9 store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:36,425 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#B#compaction#493 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:36,426 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/391b96cc76b6447fb320c62627771973 is 50, key is test_row_0/B:col10/1732018835162/Put/seqid=0 2024-11-19T12:20:36,426 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411195aba552656524c469f402c61b963c1fa_7d5f97e10ca89010b1b0ccd9ef5577c9, store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:36,426 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411195aba552656524c469f402c61b963c1fa_7d5f97e10ca89010b1b0ccd9ef5577c9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:36,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742408_1584 (size=12663) 2024-11-19T12:20:36,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742409_1585 (size=4469) 2024-11-19T12:20:36,441 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#A#compaction#492 average throughput is 1.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:36,442 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/376ee4f61bbf46539f244fbd5baeff1b is 175, key is test_row_0/A:col10/1732018835162/Put/seqid=0 2024-11-19T12:20:36,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742410_1586 (size=31617) 2024-11-19T12:20:36,833 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/391b96cc76b6447fb320c62627771973 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/391b96cc76b6447fb320c62627771973 2024-11-19T12:20:36,837 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/B of 7d5f97e10ca89010b1b0ccd9ef5577c9 into 391b96cc76b6447fb320c62627771973(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:36,837 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:36,837 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/B, priority=13, startTime=1732018836415; duration=0sec 2024-11-19T12:20:36,837 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:36,837 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:B 2024-11-19T12:20:36,838 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:36,838 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:36,838 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/C is initiating minor compaction (all files) 2024-11-19T12:20:36,839 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/C in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:36,839 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/f9f984f6d3d447449138df062a0d6aaa, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/c3b50ae545694db88dab2072ea2fa61e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/2cad64868a364639b583990e8d9a4cf8] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=36.0 K 2024-11-19T12:20:36,839 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting f9f984f6d3d447449138df062a0d6aaa, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732018832305 2024-11-19T12:20:36,839 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting c3b50ae545694db88dab2072ea2fa61e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732018832928 2024-11-19T12:20:36,840 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 2cad64868a364639b583990e8d9a4cf8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732018834851 2024-11-19T12:20:36,845 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#C#compaction#494 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:36,845 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/24e9f243790f4523a7a176204505adda is 50, key is test_row_0/C:col10/1732018835162/Put/seqid=0 2024-11-19T12:20:36,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742411_1587 (size=12663) 2024-11-19T12:20:36,850 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/376ee4f61bbf46539f244fbd5baeff1b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/376ee4f61bbf46539f244fbd5baeff1b 2024-11-19T12:20:36,853 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/24e9f243790f4523a7a176204505adda as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/24e9f243790f4523a7a176204505adda 2024-11-19T12:20:36,853 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/A of 7d5f97e10ca89010b1b0ccd9ef5577c9 into 376ee4f61bbf46539f244fbd5baeff1b(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:36,853 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:36,853 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/A, priority=13, startTime=1732018836415; duration=0sec 2024-11-19T12:20:36,853 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:36,853 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:A 2024-11-19T12:20:36,856 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/C of 7d5f97e10ca89010b1b0ccd9ef5577c9 into 24e9f243790f4523a7a176204505adda(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:36,856 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:36,856 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/C, priority=13, startTime=1732018836415; duration=0sec 2024-11-19T12:20:36,856 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:36,856 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:C 2024-11-19T12:20:36,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-19T12:20:36,978 INFO [Thread-2341 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-19T12:20:36,979 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:20:36,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-11-19T12:20:36,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-19T12:20:36,981 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:20:36,981 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:20:36,981 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:20:37,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:37,060 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7d5f97e10ca89010b1b0ccd9ef5577c9 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-19T12:20:37,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=A 2024-11-19T12:20:37,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:37,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=B 2024-11-19T12:20:37,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:37,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=C 2024-11-19T12:20:37,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:37,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111922317a31fb5a478ca1bdfab5f8d5d87a_7d5f97e10ca89010b1b0ccd9ef5577c9 is 50, key is test_row_0/A:col10/1732018835185/Put/seqid=0 2024-11-19T12:20:37,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742412_1588 (size=12304) 2024-11-19T12:20:37,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:37,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018897074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:37,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:37,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018897075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:37,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:37,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018897075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:37,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-19T12:20:37,133 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:37,133 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-19T12:20:37,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:37,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:37,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:37,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:37,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:37,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:37,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:37,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018897177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:37,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:37,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018897177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:37,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:37,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018897178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:37,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-19T12:20:37,285 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:37,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-19T12:20:37,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:37,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:37,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:37,286 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:37,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:37,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:37,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:37,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018897310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:37,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:37,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018897311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:37,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:37,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018897379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:37,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:37,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018897379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:37,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:37,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018897379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:37,438 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:37,438 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-19T12:20:37,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:37,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:37,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:37,438 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:37,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:37,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:37,469 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:37,473 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111922317a31fb5a478ca1bdfab5f8d5d87a_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111922317a31fb5a478ca1bdfab5f8d5d87a_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:37,473 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/6fda24bfe30a474b93887479041b2a18, store: [table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:37,474 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/6fda24bfe30a474b93887479041b2a18 is 175, key is test_row_0/A:col10/1732018835185/Put/seqid=0 2024-11-19T12:20:37,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742413_1589 (size=31105) 2024-11-19T12:20:37,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-19T12:20:37,590 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:37,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-19T12:20:37,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:37,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:37,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:37,590 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:37,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:37,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:37,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:37,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018897681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:37,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:37,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018897682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:37,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:37,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018897682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:37,742 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:37,743 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-19T12:20:37,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:37,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:37,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:37,743 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:37,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:37,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:37,878 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=239, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/6fda24bfe30a474b93887479041b2a18 2024-11-19T12:20:37,884 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/1f9f4ec002634e828c9155d35cc97e4a is 50, key is test_row_0/B:col10/1732018835185/Put/seqid=0 2024-11-19T12:20:37,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742414_1590 (size=12151) 2024-11-19T12:20:37,894 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:37,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-19T12:20:37,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:37,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:37,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:37,895 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:37,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:37,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:38,047 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:38,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-19T12:20:38,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:38,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:38,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:38,047 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:38,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:38,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:38,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-19T12:20:38,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:38,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018898186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:38,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:38,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018898187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:38,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:38,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018898187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:38,199 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:38,200 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-19T12:20:38,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:38,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:38,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:38,200 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:38,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:38,289 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/1f9f4ec002634e828c9155d35cc97e4a 2024-11-19T12:20:38,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/69a73903de5845bdabd2e31f7375ee03 is 50, key is test_row_0/C:col10/1732018835185/Put/seqid=0 2024-11-19T12:20:38,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742415_1591 (size=12151) 2024-11-19T12:20:38,351 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:38,352 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-19T12:20:38,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:38,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:38,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:38,352 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:38,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:38,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:38,504 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:38,505 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-19T12:20:38,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:38,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:38,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:38,505 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:38,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:38,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:38,656 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:38,656 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-19T12:20:38,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:38,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:38,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:38,657 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:38,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:38,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:38,699 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/69a73903de5845bdabd2e31f7375ee03 2024-11-19T12:20:38,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/6fda24bfe30a474b93887479041b2a18 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/6fda24bfe30a474b93887479041b2a18 2024-11-19T12:20:38,705 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/6fda24bfe30a474b93887479041b2a18, entries=150, sequenceid=239, filesize=30.4 K 2024-11-19T12:20:38,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/1f9f4ec002634e828c9155d35cc97e4a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/1f9f4ec002634e828c9155d35cc97e4a 2024-11-19T12:20:38,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/1f9f4ec002634e828c9155d35cc97e4a, entries=150, sequenceid=239, filesize=11.9 K 2024-11-19T12:20:38,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/69a73903de5845bdabd2e31f7375ee03 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/69a73903de5845bdabd2e31f7375ee03 2024-11-19T12:20:38,712 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/69a73903de5845bdabd2e31f7375ee03, entries=150, sequenceid=239, filesize=11.9 K 2024-11-19T12:20:38,713 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 7d5f97e10ca89010b1b0ccd9ef5577c9 in 1653ms, sequenceid=239, compaction requested=false 2024-11-19T12:20:38,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:38,808 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:38,809 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-19T12:20:38,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:38,809 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 7d5f97e10ca89010b1b0ccd9ef5577c9 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-19T12:20:38,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=A 2024-11-19T12:20:38,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:38,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=B 2024-11-19T12:20:38,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:38,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=C 2024-11-19T12:20:38,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:38,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111975b545ff57804405817499837e06c0bb_7d5f97e10ca89010b1b0ccd9ef5577c9 is 50, key is test_row_0/A:col10/1732018837074/Put/seqid=0 2024-11-19T12:20:38,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742416_1592 (size=12304) 2024-11-19T12:20:38,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:38,824 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111975b545ff57804405817499837e06c0bb_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111975b545ff57804405817499837e06c0bb_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:38,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/a99a59d768064fa19a35146c6e1f1247, store: [table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:38,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/a99a59d768064fa19a35146c6e1f1247 is 175, key is test_row_0/A:col10/1732018837074/Put/seqid=0 2024-11-19T12:20:38,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742417_1593 (size=31105) 2024-11-19T12:20:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-19T12:20:39,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:39,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:39,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018899206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:39,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:39,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018899208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:39,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:39,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018899209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:39,230 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=255, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/a99a59d768064fa19a35146c6e1f1247 2024-11-19T12:20:39,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/8a6bea32c68f49dab3a0ed3304c1d3f1 is 50, key is test_row_0/B:col10/1732018837074/Put/seqid=0 2024-11-19T12:20:39,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742418_1594 (size=12151) 2024-11-19T12:20:39,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:39,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018899310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:39,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:39,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018899311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:39,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:39,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018899312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:39,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:39,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018899321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:39,322 DEBUG [Thread-2331 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4135 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., hostname=af314c41f984,36047,1732018661455, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:20:39,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:39,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018899326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:39,328 DEBUG [Thread-2337 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., hostname=af314c41f984,36047,1732018661455, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:20:39,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:39,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018899513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:39,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:39,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018899514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:39,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:39,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018899514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:39,646 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/8a6bea32c68f49dab3a0ed3304c1d3f1 2024-11-19T12:20:39,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/5c700ac7893046ffb62e0ec5b3b4ebe3 is 50, key is test_row_0/C:col10/1732018837074/Put/seqid=0 2024-11-19T12:20:39,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742419_1595 (size=12151) 2024-11-19T12:20:39,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:39,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018899816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:39,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:39,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018899816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:39,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:39,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018899816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:39,928 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T12:20:40,056 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/5c700ac7893046ffb62e0ec5b3b4ebe3 2024-11-19T12:20:40,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/a99a59d768064fa19a35146c6e1f1247 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/a99a59d768064fa19a35146c6e1f1247 2024-11-19T12:20:40,063 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/a99a59d768064fa19a35146c6e1f1247, entries=150, sequenceid=255, filesize=30.4 K 2024-11-19T12:20:40,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/8a6bea32c68f49dab3a0ed3304c1d3f1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/8a6bea32c68f49dab3a0ed3304c1d3f1 2024-11-19T12:20:40,066 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/8a6bea32c68f49dab3a0ed3304c1d3f1, entries=150, sequenceid=255, filesize=11.9 K 2024-11-19T12:20:40,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/5c700ac7893046ffb62e0ec5b3b4ebe3 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/5c700ac7893046ffb62e0ec5b3b4ebe3 2024-11-19T12:20:40,069 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/5c700ac7893046ffb62e0ec5b3b4ebe3, entries=150, sequenceid=255, filesize=11.9 K 2024-11-19T12:20:40,070 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 7d5f97e10ca89010b1b0ccd9ef5577c9 in 1261ms, sequenceid=255, compaction requested=true 2024-11-19T12:20:40,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:40,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:40,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-11-19T12:20:40,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-11-19T12:20:40,072 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-19T12:20:40,072 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0900 sec 2024-11-19T12:20:40,073 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 3.0920 sec 2024-11-19T12:20:40,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:40,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7d5f97e10ca89010b1b0ccd9ef5577c9 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-19T12:20:40,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=A 2024-11-19T12:20:40,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:40,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=B 2024-11-19T12:20:40,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:40,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=C 2024-11-19T12:20:40,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:40,329 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119d33cf989ae654a8d96191278031089ef_7d5f97e10ca89010b1b0ccd9ef5577c9 is 50, key is test_row_0/A:col10/1732018839208/Put/seqid=0 2024-11-19T12:20:40,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742420_1596 (size=14994) 2024-11-19T12:20:40,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:40,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018900335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:40,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:40,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018900335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:40,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:40,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018900336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:40,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:40,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018900438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:40,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:40,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018900438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:40,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:40,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018900439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:40,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:40,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018900641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:40,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:40,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018900641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:40,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:40,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018900642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:40,733 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:40,736 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119d33cf989ae654a8d96191278031089ef_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119d33cf989ae654a8d96191278031089ef_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:40,737 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/52ee9056ae9f49bf8f73019c9f692c20, store: [table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:40,738 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/52ee9056ae9f49bf8f73019c9f692c20 is 175, key is test_row_0/A:col10/1732018839208/Put/seqid=0 2024-11-19T12:20:40,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742421_1597 (size=39949) 2024-11-19T12:20:40,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:40,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018900943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:40,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:40,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018900944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:40,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:40,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018900946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:41,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-19T12:20:41,085 INFO [Thread-2341 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-19T12:20:41,086 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-19T12:20:41,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-11-19T12:20:41,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-19T12:20:41,087 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:20:41,088 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:20:41,088 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:20:41,142 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=276, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/52ee9056ae9f49bf8f73019c9f692c20 2024-11-19T12:20:41,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/17b21b71a998409180f2f4c9f9fa2ec2 is 50, key is test_row_0/B:col10/1732018839208/Put/seqid=0 2024-11-19T12:20:41,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742422_1598 (size=12301) 2024-11-19T12:20:41,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-19T12:20:41,239 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:41,240 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-19T12:20:41,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:41,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:41,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:41,240 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:41,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:41,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:41,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-19T12:20:41,392 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:41,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-19T12:20:41,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:41,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:41,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:41,392 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:41,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:41,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:41,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:41,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018901448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:41,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:41,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018901448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:41,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:41,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018901450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:41,544 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:41,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-19T12:20:41,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:41,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:41,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:41,545 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:41,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:41,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:41,552 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/17b21b71a998409180f2f4c9f9fa2ec2 2024-11-19T12:20:41,558 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/246addb4c195474e9e32d1afc59d459d is 50, key is test_row_0/C:col10/1732018839208/Put/seqid=0 2024-11-19T12:20:41,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742423_1599 (size=12301) 2024-11-19T12:20:41,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-19T12:20:41,696 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:41,697 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-19T12:20:41,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:41,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:41,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:41,697 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:41,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:41,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:41,849 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:41,849 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-19T12:20:41,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:41,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:41,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:41,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:41,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:41,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:20:41,962 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/246addb4c195474e9e32d1afc59d459d 2024-11-19T12:20:41,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/52ee9056ae9f49bf8f73019c9f692c20 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/52ee9056ae9f49bf8f73019c9f692c20 2024-11-19T12:20:41,968 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/52ee9056ae9f49bf8f73019c9f692c20, entries=200, sequenceid=276, filesize=39.0 K 2024-11-19T12:20:41,969 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/17b21b71a998409180f2f4c9f9fa2ec2 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/17b21b71a998409180f2f4c9f9fa2ec2 2024-11-19T12:20:41,971 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/17b21b71a998409180f2f4c9f9fa2ec2, entries=150, sequenceid=276, filesize=12.0 K 2024-11-19T12:20:41,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/246addb4c195474e9e32d1afc59d459d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/246addb4c195474e9e32d1afc59d459d 2024-11-19T12:20:41,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/246addb4c195474e9e32d1afc59d459d, entries=150, sequenceid=276, filesize=12.0 K 2024-11-19T12:20:41,975 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 7d5f97e10ca89010b1b0ccd9ef5577c9 in 1653ms, sequenceid=276, compaction requested=true 2024-11-19T12:20:41,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:41,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:20:41,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:41,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:20:41,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:41,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:20:41,976 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:20:41,976 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:20:41,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:41,976 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49266 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:20:41,976 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133776 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:20:41,977 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/B is initiating minor compaction (all files) 2024-11-19T12:20:41,977 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/A is initiating minor compaction (all files) 2024-11-19T12:20:41,977 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/B in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:41,977 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/A in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:41,977 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/391b96cc76b6447fb320c62627771973, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/1f9f4ec002634e828c9155d35cc97e4a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/8a6bea32c68f49dab3a0ed3304c1d3f1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/17b21b71a998409180f2f4c9f9fa2ec2] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=48.1 K 2024-11-19T12:20:41,977 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/376ee4f61bbf46539f244fbd5baeff1b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/6fda24bfe30a474b93887479041b2a18, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/a99a59d768064fa19a35146c6e1f1247, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/52ee9056ae9f49bf8f73019c9f692c20] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=130.6 K 2024-11-19T12:20:41,977 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:41,977 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/376ee4f61bbf46539f244fbd5baeff1b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/6fda24bfe30a474b93887479041b2a18, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/a99a59d768064fa19a35146c6e1f1247, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/52ee9056ae9f49bf8f73019c9f692c20] 2024-11-19T12:20:41,977 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 376ee4f61bbf46539f244fbd5baeff1b, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732018834851 2024-11-19T12:20:41,977 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 391b96cc76b6447fb320c62627771973, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732018834851 2024-11-19T12:20:41,978 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6fda24bfe30a474b93887479041b2a18, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732018835185 2024-11-19T12:20:41,978 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f9f4ec002634e828c9155d35cc97e4a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732018835185 2024-11-19T12:20:41,978 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting a99a59d768064fa19a35146c6e1f1247, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732018837071 2024-11-19T12:20:41,978 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a6bea32c68f49dab3a0ed3304c1d3f1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732018837071 2024-11-19T12:20:41,978 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52ee9056ae9f49bf8f73019c9f692c20, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732018839203 2024-11-19T12:20:41,978 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 17b21b71a998409180f2f4c9f9fa2ec2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732018839207 2024-11-19T12:20:41,984 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#B#compaction#504 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:41,985 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/73833ec2a1744e888522ca3c241dcbcc is 50, key is test_row_0/B:col10/1732018839208/Put/seqid=0 2024-11-19T12:20:41,986 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:41,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742424_1600 (size=12949) 2024-11-19T12:20:41,988 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241119433f2e06c00f4ee58e5d19f04d35e91a_7d5f97e10ca89010b1b0ccd9ef5577c9 store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:41,991 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241119433f2e06c00f4ee58e5d19f04d35e91a_7d5f97e10ca89010b1b0ccd9ef5577c9, store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:41,991 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119433f2e06c00f4ee58e5d19f04d35e91a_7d5f97e10ca89010b1b0ccd9ef5577c9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:41,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742425_1601 (size=4469) 2024-11-19T12:20:41,999 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#A#compaction#505 average throughput is 1.88 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:42,000 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/1dec924a26ed4a18968e0ff31b965888 is 175, key is test_row_0/A:col10/1732018839208/Put/seqid=0 2024-11-19T12:20:42,001 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:42,002 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36047 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-19T12:20:42,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:42,002 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing 7d5f97e10ca89010b1b0ccd9ef5577c9 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-19T12:20:42,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=A 2024-11-19T12:20:42,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:42,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=B 2024-11-19T12:20:42,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:42,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=C 2024-11-19T12:20:42,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:42,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742426_1602 (size=31903) 2024-11-19T12:20:42,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411194ef84fa10c0a4bf0aecc79761e52875d_7d5f97e10ca89010b1b0ccd9ef5577c9 is 50, key is test_row_0/A:col10/1732018840334/Put/seqid=0 2024-11-19T12:20:42,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742427_1603 (size=12454) 2024-11-19T12:20:42,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-19T12:20:42,392 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/73833ec2a1744e888522ca3c241dcbcc as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/73833ec2a1744e888522ca3c241dcbcc 2024-11-19T12:20:42,396 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/B of 7d5f97e10ca89010b1b0ccd9ef5577c9 into 73833ec2a1744e888522ca3c241dcbcc(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:42,396 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:42,396 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/B, priority=12, startTime=1732018841975; duration=0sec 2024-11-19T12:20:42,396 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:42,396 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:B 2024-11-19T12:20:42,396 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T12:20:42,397 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49266 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T12:20:42,397 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/C is initiating minor compaction (all files) 2024-11-19T12:20:42,397 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/C in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:42,398 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/24e9f243790f4523a7a176204505adda, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/69a73903de5845bdabd2e31f7375ee03, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/5c700ac7893046ffb62e0ec5b3b4ebe3, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/246addb4c195474e9e32d1afc59d459d] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=48.1 K 2024-11-19T12:20:42,398 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 24e9f243790f4523a7a176204505adda, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732018834851 2024-11-19T12:20:42,398 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 69a73903de5845bdabd2e31f7375ee03, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732018835185 2024-11-19T12:20:42,399 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c700ac7893046ffb62e0ec5b3b4ebe3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732018837071 2024-11-19T12:20:42,399 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 246addb4c195474e9e32d1afc59d459d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732018839207 2024-11-19T12:20:42,405 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#C#compaction#507 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:42,406 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/74152bbacdb346f4b31e8d0b32e9589a is 50, key is test_row_0/C:col10/1732018839208/Put/seqid=0 2024-11-19T12:20:42,408 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/1dec924a26ed4a18968e0ff31b965888 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1dec924a26ed4a18968e0ff31b965888 2024-11-19T12:20:42,411 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/A of 7d5f97e10ca89010b1b0ccd9ef5577c9 into 1dec924a26ed4a18968e0ff31b965888(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:42,411 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:42,411 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/A, priority=12, startTime=1732018841975; duration=0sec 2024-11-19T12:20:42,411 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:42,411 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:A 2024-11-19T12:20:42,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742428_1604 (size=12949) 2024-11-19T12:20:42,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:42,420 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411194ef84fa10c0a4bf0aecc79761e52875d_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411194ef84fa10c0a4bf0aecc79761e52875d_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:42,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/76c698adcee84f4382ac40bde882a1ed, store: [table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:42,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/76c698adcee84f4382ac40bde882a1ed is 175, key is test_row_0/A:col10/1732018840334/Put/seqid=0 2024-11-19T12:20:42,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742429_1605 (size=31255) 2024-11-19T12:20:42,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. as already flushing 2024-11-19T12:20:42,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:42,470 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:42,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018902468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:42,471 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:42,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018902469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:42,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:42,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018902471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:42,493 DEBUG [Thread-2346 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1ca17819 to 127.0.0.1:64186 2024-11-19T12:20:42,493 DEBUG [Thread-2346 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:42,493 DEBUG [Thread-2344 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4aa4b067 to 127.0.0.1:64186 2024-11-19T12:20:42,493 DEBUG [Thread-2344 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:42,493 DEBUG [Thread-2342 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3852b0e3 to 127.0.0.1:64186 2024-11-19T12:20:42,494 DEBUG [Thread-2342 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:42,495 DEBUG [Thread-2348 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x77a6a62c to 127.0.0.1:64186 2024-11-19T12:20:42,495 DEBUG [Thread-2350 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x125099a6 to 127.0.0.1:64186 2024-11-19T12:20:42,495 DEBUG [Thread-2348 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:42,495 DEBUG [Thread-2350 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:42,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:42,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:42,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018902571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:42,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018902571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:42,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:42,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018902573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:42,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:42,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018902772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:42,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:42,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018902773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:42,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:42,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018902774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:42,817 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/74152bbacdb346f4b31e8d0b32e9589a as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/74152bbacdb346f4b31e8d0b32e9589a 2024-11-19T12:20:42,820 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/C of 7d5f97e10ca89010b1b0ccd9ef5577c9 into 74152bbacdb346f4b31e8d0b32e9589a(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:42,820 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:42,820 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/C, priority=12, startTime=1732018841976; duration=0sec 2024-11-19T12:20:42,820 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:42,820 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:C 2024-11-19T12:20:42,824 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=291, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/76c698adcee84f4382ac40bde882a1ed 2024-11-19T12:20:42,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/9de218652feb4fcd8a32ee9b4f5d2560 is 50, key is test_row_0/B:col10/1732018840334/Put/seqid=0 2024-11-19T12:20:42,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742430_1606 (size=12301) 2024-11-19T12:20:43,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:43,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018903075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:43,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:43,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018903075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:43,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:43,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018903076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:43,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-19T12:20:43,232 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/9de218652feb4fcd8a32ee9b4f5d2560 2024-11-19T12:20:43,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/29685e5271c04813a03a17c8eb49667b is 50, key is test_row_0/C:col10/1732018840334/Put/seqid=0 2024-11-19T12:20:43,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742431_1607 (size=12301) 2024-11-19T12:20:43,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:43,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48938 deadline: 1732018903338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:43,338 DEBUG [Thread-2337 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8152 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., hostname=af314c41f984,36047,1732018661455, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:20:43,343 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:43,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49018 deadline: 1732018903343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:43,344 DEBUG [Thread-2331 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8156 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., hostname=af314c41f984,36047,1732018661455, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:20:43,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:43,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48956 deadline: 1732018903577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:43,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:43,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49000 deadline: 1732018903579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:43,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-19T12:20:43,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48968 deadline: 1732018903579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 2024-11-19T12:20:43,640 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/29685e5271c04813a03a17c8eb49667b 2024-11-19T12:20:43,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/76c698adcee84f4382ac40bde882a1ed as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/76c698adcee84f4382ac40bde882a1ed 2024-11-19T12:20:43,645 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/76c698adcee84f4382ac40bde882a1ed, entries=150, sequenceid=291, filesize=30.5 K 2024-11-19T12:20:43,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/9de218652feb4fcd8a32ee9b4f5d2560 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/9de218652feb4fcd8a32ee9b4f5d2560 2024-11-19T12:20:43,648 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/9de218652feb4fcd8a32ee9b4f5d2560, entries=150, sequenceid=291, filesize=12.0 K 2024-11-19T12:20:43,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/29685e5271c04813a03a17c8eb49667b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/29685e5271c04813a03a17c8eb49667b 2024-11-19T12:20:43,651 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/29685e5271c04813a03a17c8eb49667b, entries=150, sequenceid=291, filesize=12.0 K 2024-11-19T12:20:43,651 INFO [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 7d5f97e10ca89010b1b0ccd9ef5577c9 in 1649ms, sequenceid=291, compaction requested=false 2024-11-19T12:20:43,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:43,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:43,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/af314c41f984:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-11-19T12:20:43,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-11-19T12:20:43,653 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-19T12:20:43,653 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5640 sec 2024-11-19T12:20:43,654 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 2.5670 sec 2024-11-19T12:20:44,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36047 {}] regionserver.HRegion(8581): Flush requested on 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:44,584 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7d5f97e10ca89010b1b0ccd9ef5577c9 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-19T12:20:44,584 DEBUG [Thread-2339 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x205568ef to 127.0.0.1:64186 2024-11-19T12:20:44,584 DEBUG [Thread-2339 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:44,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=A 2024-11-19T12:20:44,584 DEBUG [Thread-2333 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4949adfa to 127.0.0.1:64186 2024-11-19T12:20:44,584 DEBUG [Thread-2333 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:44,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:44,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=B 2024-11-19T12:20:44,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:44,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=C 2024-11-19T12:20:44,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:44,585 DEBUG [Thread-2335 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53ef82c4 to 127.0.0.1:64186 2024-11-19T12:20:44,586 DEBUG [Thread-2335 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:44,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119d8fc2a5a83704925a101ce286dc58b0d_7d5f97e10ca89010b1b0ccd9ef5577c9 is 50, key is test_row_0/A:col10/1732018844582/Put/seqid=0 2024-11-19T12:20:44,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742432_1608 (size=12454) 2024-11-19T12:20:44,994 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:44,996 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119d8fc2a5a83704925a101ce286dc58b0d_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119d8fc2a5a83704925a101ce286dc58b0d_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:44,997 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/1bf5474f27ed44648db1878915998d73, store: [table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:44,997 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/1bf5474f27ed44648db1878915998d73 is 175, key is test_row_0/A:col10/1732018844582/Put/seqid=0 2024-11-19T12:20:45,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742433_1609 (size=31255) 2024-11-19T12:20:45,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-19T12:20:45,191 INFO [Thread-2341 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-19T12:20:45,400 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=317, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/1bf5474f27ed44648db1878915998d73 2024-11-19T12:20:45,405 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/f289d43d0767496daa699b6fad0212a7 is 50, key is test_row_0/B:col10/1732018844582/Put/seqid=0 2024-11-19T12:20:45,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742434_1610 (size=12301) 2024-11-19T12:20:45,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/f289d43d0767496daa699b6fad0212a7 2024-11-19T12:20:45,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/aa0eae2ae013401c8c5125e33b8ae129 is 50, key is test_row_0/C:col10/1732018844582/Put/seqid=0 2024-11-19T12:20:45,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742435_1611 (size=12301) 2024-11-19T12:20:46,217 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/aa0eae2ae013401c8c5125e33b8ae129 2024-11-19T12:20:46,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/1bf5474f27ed44648db1878915998d73 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1bf5474f27ed44648db1878915998d73 2024-11-19T12:20:46,223 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1bf5474f27ed44648db1878915998d73, entries=150, sequenceid=317, filesize=30.5 K 2024-11-19T12:20:46,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/f289d43d0767496daa699b6fad0212a7 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/f289d43d0767496daa699b6fad0212a7 2024-11-19T12:20:46,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/f289d43d0767496daa699b6fad0212a7, entries=150, sequenceid=317, filesize=12.0 K 2024-11-19T12:20:46,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/aa0eae2ae013401c8c5125e33b8ae129 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/aa0eae2ae013401c8c5125e33b8ae129 2024-11-19T12:20:46,228 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/aa0eae2ae013401c8c5125e33b8ae129, entries=150, sequenceid=317, filesize=12.0 K 2024-11-19T12:20:46,229 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=6.71 KB/6870 for 7d5f97e10ca89010b1b0ccd9ef5577c9 in 1646ms, sequenceid=317, compaction requested=true 2024-11-19T12:20:46,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:46,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:A, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:20:46,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:46,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:B, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:20:46,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:46,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7d5f97e10ca89010b1b0ccd9ef5577c9:C, priority=-2147483648, current under compaction store size is 3 2024-11-19T12:20:46,229 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:46,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:46,229 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:46,230 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94413 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:46,230 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:46,230 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/A is initiating minor compaction (all files) 2024-11-19T12:20:46,230 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/B is initiating minor compaction (all files) 2024-11-19T12:20:46,230 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/A in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:46,230 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/B in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:46,230 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1dec924a26ed4a18968e0ff31b965888, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/76c698adcee84f4382ac40bde882a1ed, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1bf5474f27ed44648db1878915998d73] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=92.2 K 2024-11-19T12:20:46,230 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/73833ec2a1744e888522ca3c241dcbcc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/9de218652feb4fcd8a32ee9b4f5d2560, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/f289d43d0767496daa699b6fad0212a7] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=36.7 K 2024-11-19T12:20:46,230 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:46,230 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. files: [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1dec924a26ed4a18968e0ff31b965888, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/76c698adcee84f4382ac40bde882a1ed, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1bf5474f27ed44648db1878915998d73] 2024-11-19T12:20:46,230 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 73833ec2a1744e888522ca3c241dcbcc, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732018839207 2024-11-19T12:20:46,230 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1dec924a26ed4a18968e0ff31b965888, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732018839207 2024-11-19T12:20:46,230 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting 9de218652feb4fcd8a32ee9b4f5d2560, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732018840334 2024-11-19T12:20:46,230 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 76c698adcee84f4382ac40bde882a1ed, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732018840334 2024-11-19T12:20:46,231 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] compactions.Compactor(224): Compacting f289d43d0767496daa699b6fad0212a7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732018842470 2024-11-19T12:20:46,231 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bf5474f27ed44648db1878915998d73, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732018842470 2024-11-19T12:20:46,236 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#B#compaction#513 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:46,237 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/abc243d5c88c4baf9398b9d468a51987 is 50, key is test_row_0/B:col10/1732018844582/Put/seqid=0 2024-11-19T12:20:46,239 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:46,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742436_1612 (size=13051) 2024-11-19T12:20:46,240 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241119dd2a22cfbe0e49549e8d5a9263ec6264_7d5f97e10ca89010b1b0ccd9ef5577c9 store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:46,254 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241119dd2a22cfbe0e49549e8d5a9263ec6264_7d5f97e10ca89010b1b0ccd9ef5577c9, store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:46,255 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241119dd2a22cfbe0e49549e8d5a9263ec6264_7d5f97e10ca89010b1b0ccd9ef5577c9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:46,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742437_1613 (size=4469) 2024-11-19T12:20:46,259 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#A#compaction#514 average throughput is 1.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:46,259 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/d48a67cc9db54a63ab761e1f31851ea6 is 175, key is test_row_0/A:col10/1732018844582/Put/seqid=0 2024-11-19T12:20:46,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742438_1614 (size=32005) 2024-11-19T12:20:46,268 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/d48a67cc9db54a63ab761e1f31851ea6 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/d48a67cc9db54a63ab761e1f31851ea6 2024-11-19T12:20:46,272 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/A of 7d5f97e10ca89010b1b0ccd9ef5577c9 into d48a67cc9db54a63ab761e1f31851ea6(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:46,272 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:46,272 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/A, priority=13, startTime=1732018846229; duration=0sec 2024-11-19T12:20:46,272 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-19T12:20:46,272 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:A 2024-11-19T12:20:46,272 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:20:46,273 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:20:46,273 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1540): 7d5f97e10ca89010b1b0ccd9ef5577c9/C is initiating minor compaction (all files) 2024-11-19T12:20:46,273 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7d5f97e10ca89010b1b0ccd9ef5577c9/C in TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:46,273 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/74152bbacdb346f4b31e8d0b32e9589a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/29685e5271c04813a03a17c8eb49667b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/aa0eae2ae013401c8c5125e33b8ae129] into tmpdir=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp, totalSize=36.7 K 2024-11-19T12:20:46,273 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74152bbacdb346f4b31e8d0b32e9589a, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732018839207 2024-11-19T12:20:46,274 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29685e5271c04813a03a17c8eb49667b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732018840334 2024-11-19T12:20:46,274 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa0eae2ae013401c8c5125e33b8ae129, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732018842470 2024-11-19T12:20:46,280 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7d5f97e10ca89010b1b0ccd9ef5577c9#C#compaction#515 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:20:46,281 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/7bf165dee3614851aea9126b7db06dd1 is 50, key is test_row_0/C:col10/1732018844582/Put/seqid=0 2024-11-19T12:20:46,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742439_1615 (size=13051) 2024-11-19T12:20:46,643 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/abc243d5c88c4baf9398b9d468a51987 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/abc243d5c88c4baf9398b9d468a51987 2024-11-19T12:20:46,646 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/B of 7d5f97e10ca89010b1b0ccd9ef5577c9 into abc243d5c88c4baf9398b9d468a51987(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:46,646 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:46,646 INFO [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/B, priority=13, startTime=1732018846229; duration=0sec 2024-11-19T12:20:46,646 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:46,646 DEBUG [RS:0;af314c41f984:36047-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:B 2024-11-19T12:20:46,687 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/7bf165dee3614851aea9126b7db06dd1 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/7bf165dee3614851aea9126b7db06dd1 2024-11-19T12:20:46,690 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7d5f97e10ca89010b1b0ccd9ef5577c9/C of 7d5f97e10ca89010b1b0ccd9ef5577c9 into 7bf165dee3614851aea9126b7db06dd1(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:20:46,690 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:46,690 INFO [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9., storeName=7d5f97e10ca89010b1b0ccd9ef5577c9/C, priority=13, startTime=1732018846229; duration=0sec 2024-11-19T12:20:46,690 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:20:46,690 DEBUG [RS:0;af314c41f984:36047-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7d5f97e10ca89010b1b0ccd9ef5577c9:C 2024-11-19T12:20:53,371 DEBUG [Thread-2331 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31f7586d to 127.0.0.1:64186 2024-11-19T12:20:53,371 DEBUG [Thread-2331 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:53,410 DEBUG [Thread-2337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0672325a to 127.0.0.1:64186 2024-11-19T12:20:53,410 DEBUG [Thread-2337 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:53,410 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-19T12:20:53,410 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 28 2024-11-19T12:20:53,410 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 65 2024-11-19T12:20:53,410 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-11-19T12:20:53,410 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 41 2024-11-19T12:20:53,410 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 55 2024-11-19T12:20:53,410 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-19T12:20:53,410 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7831 2024-11-19T12:20:53,410 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7563 2024-11-19T12:20:53,410 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7437 2024-11-19T12:20:53,410 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7805 2024-11-19T12:20:53,410 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7561 2024-11-19T12:20:53,410 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-19T12:20:53,410 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-19T12:20:53,410 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4bbf3c1c to 127.0.0.1:64186 2024-11-19T12:20:53,410 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:53,411 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-19T12:20:53,411 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-19T12:20:53,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-19T12:20:53,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-19T12:20:53,413 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018853413"}]},"ts":"1732018853413"} 2024-11-19T12:20:53,414 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-19T12:20:53,416 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-19T12:20:53,416 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-19T12:20:53,417 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=177, ppid=176, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7d5f97e10ca89010b1b0ccd9ef5577c9, UNASSIGN}] 2024-11-19T12:20:53,417 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=177, ppid=176, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7d5f97e10ca89010b1b0ccd9ef5577c9, UNASSIGN 2024-11-19T12:20:53,418 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=177 updating hbase:meta row=7d5f97e10ca89010b1b0ccd9ef5577c9, regionState=CLOSING, regionLocation=af314c41f984,36047,1732018661455 2024-11-19T12:20:53,419 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-19T12:20:53,419 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; CloseRegionProcedure 7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455}] 2024-11-19T12:20:53,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-19T12:20:53,569 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to af314c41f984,36047,1732018661455 2024-11-19T12:20:53,570 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(124): Close 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:53,570 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-19T12:20:53,570 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1681): Closing 7d5f97e10ca89010b1b0ccd9ef5577c9, disabling compactions & flushes 2024-11-19T12:20:53,570 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:53,570 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:53,570 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. after waiting 0 ms 2024-11-19T12:20:53,570 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:53,570 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(2837): Flushing 7d5f97e10ca89010b1b0ccd9ef5577c9 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-19T12:20:53,570 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=A 2024-11-19T12:20:53,570 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:53,570 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=B 2024-11-19T12:20:53,570 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:53,570 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7d5f97e10ca89010b1b0ccd9ef5577c9, store=C 2024-11-19T12:20:53,570 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-19T12:20:53,575 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411197f09a4e052b6484ca0f20fa6a11b2627_7d5f97e10ca89010b1b0ccd9ef5577c9 is 50, key is test_row_0/A:col10/1732018853370/Put/seqid=0 2024-11-19T12:20:53,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742440_1616 (size=9914) 2024-11-19T12:20:53,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-19T12:20:53,978 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:20:53,982 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411197f09a4e052b6484ca0f20fa6a11b2627_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411197f09a4e052b6484ca0f20fa6a11b2627_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:53,982 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/346ebb34bc6a4a148a6593054e75328b, store: [table=TestAcidGuarantees family=A region=7d5f97e10ca89010b1b0ccd9ef5577c9] 2024-11-19T12:20:53,983 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/346ebb34bc6a4a148a6593054e75328b is 175, key is test_row_0/A:col10/1732018853370/Put/seqid=0 2024-11-19T12:20:53,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742441_1617 (size=22561) 2024-11-19T12:20:54,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-19T12:20:54,386 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=326, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/346ebb34bc6a4a148a6593054e75328b 2024-11-19T12:20:54,391 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/de8bda210aa440cab8940d4a9152505d is 50, key is test_row_0/B:col10/1732018853370/Put/seqid=0 2024-11-19T12:20:54,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742442_1618 (size=9857) 2024-11-19T12:20:54,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-19T12:20:54,794 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/de8bda210aa440cab8940d4a9152505d 2024-11-19T12:20:54,799 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/46850e0f14ee4367878eb62a0073fa48 is 50, key is test_row_0/C:col10/1732018853370/Put/seqid=0 2024-11-19T12:20:54,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742443_1619 (size=9857) 2024-11-19T12:20:55,202 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/46850e0f14ee4367878eb62a0073fa48 2024-11-19T12:20:55,205 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/A/346ebb34bc6a4a148a6593054e75328b as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/346ebb34bc6a4a148a6593054e75328b 2024-11-19T12:20:55,207 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/346ebb34bc6a4a148a6593054e75328b, entries=100, sequenceid=326, filesize=22.0 K 2024-11-19T12:20:55,208 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/B/de8bda210aa440cab8940d4a9152505d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/de8bda210aa440cab8940d4a9152505d 2024-11-19T12:20:55,210 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/de8bda210aa440cab8940d4a9152505d, entries=100, sequenceid=326, filesize=9.6 K 2024-11-19T12:20:55,210 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/.tmp/C/46850e0f14ee4367878eb62a0073fa48 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/46850e0f14ee4367878eb62a0073fa48 2024-11-19T12:20:55,212 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/46850e0f14ee4367878eb62a0073fa48, entries=100, sequenceid=326, filesize=9.6 K 2024-11-19T12:20:55,212 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 7d5f97e10ca89010b1b0ccd9ef5577c9 in 1642ms, sequenceid=326, compaction requested=false 2024-11-19T12:20:55,213 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/15d988f7ba8441b7b215f1327702e4dc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/840d3bdb10ba4ed1a6b23bc77f56b443, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/c1880b5f2cfd4edebdff89bf87dc0e97, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/d9791e0183c5440497c32bb5abedbe5a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1517baf059ec43da91e10de1a3dd0fe7, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/da676efe52dc42b68deb2c50cdfd89b4, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/c2d68fe0fcb5490e803f0aeb2295b322, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/0037e7fcc2c044ff81a3fd4f0737fef7, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/7c3b3b82badb413183acc6df87a8c694, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/f397c298ed8544e5905bca53802ad3ee, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/90eebebb8291489abd2427d8c8338f9c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/8155c8bd852d4163850162f403f7cccd, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/2e24ce8677ac406da5368d2428f0e65a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/51c6e99b51eb46d4ae4205469b46c0fe, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/376ee4f61bbf46539f244fbd5baeff1b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/ed821c4d3a4b4ee288da014bc818f70b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/6fda24bfe30a474b93887479041b2a18, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/a99a59d768064fa19a35146c6e1f1247, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/52ee9056ae9f49bf8f73019c9f692c20, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1dec924a26ed4a18968e0ff31b965888, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/76c698adcee84f4382ac40bde882a1ed, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1bf5474f27ed44648db1878915998d73] to archive 2024-11-19T12:20:55,213 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:20:55,215 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/15d988f7ba8441b7b215f1327702e4dc to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/15d988f7ba8441b7b215f1327702e4dc 2024-11-19T12:20:55,215 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/840d3bdb10ba4ed1a6b23bc77f56b443 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/840d3bdb10ba4ed1a6b23bc77f56b443 2024-11-19T12:20:55,216 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/c1880b5f2cfd4edebdff89bf87dc0e97 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/c1880b5f2cfd4edebdff89bf87dc0e97 2024-11-19T12:20:55,217 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/d9791e0183c5440497c32bb5abedbe5a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/d9791e0183c5440497c32bb5abedbe5a 2024-11-19T12:20:55,218 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1517baf059ec43da91e10de1a3dd0fe7 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1517baf059ec43da91e10de1a3dd0fe7 2024-11-19T12:20:55,219 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/da676efe52dc42b68deb2c50cdfd89b4 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/da676efe52dc42b68deb2c50cdfd89b4 2024-11-19T12:20:55,220 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/c2d68fe0fcb5490e803f0aeb2295b322 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/c2d68fe0fcb5490e803f0aeb2295b322 2024-11-19T12:20:55,220 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/0037e7fcc2c044ff81a3fd4f0737fef7 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/0037e7fcc2c044ff81a3fd4f0737fef7 2024-11-19T12:20:55,221 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/7c3b3b82badb413183acc6df87a8c694 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/7c3b3b82badb413183acc6df87a8c694 2024-11-19T12:20:55,222 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/f397c298ed8544e5905bca53802ad3ee to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/f397c298ed8544e5905bca53802ad3ee 2024-11-19T12:20:55,223 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/90eebebb8291489abd2427d8c8338f9c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/90eebebb8291489abd2427d8c8338f9c 2024-11-19T12:20:55,224 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/8155c8bd852d4163850162f403f7cccd to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/8155c8bd852d4163850162f403f7cccd 2024-11-19T12:20:55,224 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/2e24ce8677ac406da5368d2428f0e65a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/2e24ce8677ac406da5368d2428f0e65a 2024-11-19T12:20:55,225 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/51c6e99b51eb46d4ae4205469b46c0fe to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/51c6e99b51eb46d4ae4205469b46c0fe 2024-11-19T12:20:55,226 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/376ee4f61bbf46539f244fbd5baeff1b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/376ee4f61bbf46539f244fbd5baeff1b 2024-11-19T12:20:55,227 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/ed821c4d3a4b4ee288da014bc818f70b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/ed821c4d3a4b4ee288da014bc818f70b 2024-11-19T12:20:55,227 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/6fda24bfe30a474b93887479041b2a18 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/6fda24bfe30a474b93887479041b2a18 2024-11-19T12:20:55,228 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/a99a59d768064fa19a35146c6e1f1247 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/a99a59d768064fa19a35146c6e1f1247 2024-11-19T12:20:55,229 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/52ee9056ae9f49bf8f73019c9f692c20 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/52ee9056ae9f49bf8f73019c9f692c20 2024-11-19T12:20:55,230 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1dec924a26ed4a18968e0ff31b965888 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1dec924a26ed4a18968e0ff31b965888 2024-11-19T12:20:55,230 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/76c698adcee84f4382ac40bde882a1ed to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/76c698adcee84f4382ac40bde882a1ed 2024-11-19T12:20:55,231 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1bf5474f27ed44648db1878915998d73 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/1bf5474f27ed44648db1878915998d73 2024-11-19T12:20:55,232 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/2e02a2ab878f43eda0fb5e5d95eec8d8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/6409464d35b24ac7a7313c304be67616, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/c84c715e49b648bfb5c1a4f67aab8218, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/84d5bdf0523648498f759f3d0304a95c, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/d52ce390e3b94d0aa53f3a5a8356b5fe, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/ba2b685d8f574e6a89dde0d2fbdd4ebc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/112c5311b2044768be3ed4dd5714da46, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/e78f01546a0c4312b686b378be714b12, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/4746dc5862724be4ad4d2ee6c6c1c629, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/0bfaae182c474941b6aa650c3acdbf82, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/1410d94891f346dd8f6a736cbbfcd528, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/eb6f99aecc2549d5aa719be6daf3c92f, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/5795845dee8a402eaaf045d17e8c793a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/f0f96a2f472c4f79b1003a1b42479d39, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/391b96cc76b6447fb320c62627771973, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/3b8a1850e4fd4031984147f724f5d34b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/1f9f4ec002634e828c9155d35cc97e4a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/8a6bea32c68f49dab3a0ed3304c1d3f1, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/73833ec2a1744e888522ca3c241dcbcc, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/17b21b71a998409180f2f4c9f9fa2ec2, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/9de218652feb4fcd8a32ee9b4f5d2560, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/f289d43d0767496daa699b6fad0212a7] to archive 2024-11-19T12:20:55,233 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:20:55,234 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/2e02a2ab878f43eda0fb5e5d95eec8d8 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/2e02a2ab878f43eda0fb5e5d95eec8d8 2024-11-19T12:20:55,235 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/6409464d35b24ac7a7313c304be67616 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/6409464d35b24ac7a7313c304be67616 2024-11-19T12:20:55,236 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/c84c715e49b648bfb5c1a4f67aab8218 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/c84c715e49b648bfb5c1a4f67aab8218 2024-11-19T12:20:55,237 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/84d5bdf0523648498f759f3d0304a95c to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/84d5bdf0523648498f759f3d0304a95c 2024-11-19T12:20:55,237 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/d52ce390e3b94d0aa53f3a5a8356b5fe to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/d52ce390e3b94d0aa53f3a5a8356b5fe 2024-11-19T12:20:55,238 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/ba2b685d8f574e6a89dde0d2fbdd4ebc to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/ba2b685d8f574e6a89dde0d2fbdd4ebc 2024-11-19T12:20:55,239 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/112c5311b2044768be3ed4dd5714da46 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/112c5311b2044768be3ed4dd5714da46 2024-11-19T12:20:55,240 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/e78f01546a0c4312b686b378be714b12 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/e78f01546a0c4312b686b378be714b12 2024-11-19T12:20:55,241 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/4746dc5862724be4ad4d2ee6c6c1c629 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/4746dc5862724be4ad4d2ee6c6c1c629 2024-11-19T12:20:55,242 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/0bfaae182c474941b6aa650c3acdbf82 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/0bfaae182c474941b6aa650c3acdbf82 2024-11-19T12:20:55,242 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/1410d94891f346dd8f6a736cbbfcd528 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/1410d94891f346dd8f6a736cbbfcd528 2024-11-19T12:20:55,243 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/eb6f99aecc2549d5aa719be6daf3c92f to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/eb6f99aecc2549d5aa719be6daf3c92f 2024-11-19T12:20:55,244 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/5795845dee8a402eaaf045d17e8c793a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/5795845dee8a402eaaf045d17e8c793a 2024-11-19T12:20:55,245 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/f0f96a2f472c4f79b1003a1b42479d39 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/f0f96a2f472c4f79b1003a1b42479d39 2024-11-19T12:20:55,246 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/391b96cc76b6447fb320c62627771973 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/391b96cc76b6447fb320c62627771973 2024-11-19T12:20:55,247 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/3b8a1850e4fd4031984147f724f5d34b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/3b8a1850e4fd4031984147f724f5d34b 2024-11-19T12:20:55,247 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/1f9f4ec002634e828c9155d35cc97e4a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/1f9f4ec002634e828c9155d35cc97e4a 2024-11-19T12:20:55,248 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/8a6bea32c68f49dab3a0ed3304c1d3f1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/8a6bea32c68f49dab3a0ed3304c1d3f1 2024-11-19T12:20:55,249 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/73833ec2a1744e888522ca3c241dcbcc to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/73833ec2a1744e888522ca3c241dcbcc 2024-11-19T12:20:55,250 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/17b21b71a998409180f2f4c9f9fa2ec2 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/17b21b71a998409180f2f4c9f9fa2ec2 2024-11-19T12:20:55,251 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/9de218652feb4fcd8a32ee9b4f5d2560 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/9de218652feb4fcd8a32ee9b4f5d2560 2024-11-19T12:20:55,252 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/f289d43d0767496daa699b6fad0212a7 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/f289d43d0767496daa699b6fad0212a7 2024-11-19T12:20:55,252 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/7cbe0066933e4eb4a4badc1b035e6e86, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/8c296a18575745ee9b6a1f20dfc0c060, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/afc431727c3f4b1db5d618c7f467a8d3, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/1a8cc492ce1d451f932164a53cdd9190, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/935363dd361f461782a2a1f101d0e450, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/ed234f5fa2f64733bf318e13a822de7e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/e7d61ad42bc843398c5a102e44b0c7ee, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/6adb71c1cf6d470fbd7f092cd44578d3, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/b69a70161abe4a09af2561d3393ab601, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/06f2416797344b93b1c5cddf846a8e04, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/9af1609ed3424b7a84d56952d0c767fe, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/f9f984f6d3d447449138df062a0d6aaa, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/567d79a8ba7f4ef7939a47283886c6cb, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/c3b50ae545694db88dab2072ea2fa61e, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/24e9f243790f4523a7a176204505adda, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/2cad64868a364639b583990e8d9a4cf8, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/69a73903de5845bdabd2e31f7375ee03, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/5c700ac7893046ffb62e0ec5b3b4ebe3, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/74152bbacdb346f4b31e8d0b32e9589a, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/246addb4c195474e9e32d1afc59d459d, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/29685e5271c04813a03a17c8eb49667b, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/aa0eae2ae013401c8c5125e33b8ae129] to archive 2024-11-19T12:20:55,253 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:20:55,254 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/7cbe0066933e4eb4a4badc1b035e6e86 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/7cbe0066933e4eb4a4badc1b035e6e86 2024-11-19T12:20:55,255 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/8c296a18575745ee9b6a1f20dfc0c060 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/8c296a18575745ee9b6a1f20dfc0c060 2024-11-19T12:20:55,255 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/afc431727c3f4b1db5d618c7f467a8d3 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/afc431727c3f4b1db5d618c7f467a8d3 2024-11-19T12:20:55,256 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/1a8cc492ce1d451f932164a53cdd9190 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/1a8cc492ce1d451f932164a53cdd9190 2024-11-19T12:20:55,257 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/935363dd361f461782a2a1f101d0e450 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/935363dd361f461782a2a1f101d0e450 2024-11-19T12:20:55,258 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/ed234f5fa2f64733bf318e13a822de7e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/ed234f5fa2f64733bf318e13a822de7e 2024-11-19T12:20:55,259 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/e7d61ad42bc843398c5a102e44b0c7ee to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/e7d61ad42bc843398c5a102e44b0c7ee 2024-11-19T12:20:55,260 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/6adb71c1cf6d470fbd7f092cd44578d3 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/6adb71c1cf6d470fbd7f092cd44578d3 2024-11-19T12:20:55,260 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/b69a70161abe4a09af2561d3393ab601 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/b69a70161abe4a09af2561d3393ab601 2024-11-19T12:20:55,261 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/06f2416797344b93b1c5cddf846a8e04 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/06f2416797344b93b1c5cddf846a8e04 2024-11-19T12:20:55,262 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/9af1609ed3424b7a84d56952d0c767fe to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/9af1609ed3424b7a84d56952d0c767fe 2024-11-19T12:20:55,263 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/f9f984f6d3d447449138df062a0d6aaa to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/f9f984f6d3d447449138df062a0d6aaa 2024-11-19T12:20:55,263 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/567d79a8ba7f4ef7939a47283886c6cb to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/567d79a8ba7f4ef7939a47283886c6cb 2024-11-19T12:20:55,264 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/c3b50ae545694db88dab2072ea2fa61e to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/c3b50ae545694db88dab2072ea2fa61e 2024-11-19T12:20:55,265 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/24e9f243790f4523a7a176204505adda to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/24e9f243790f4523a7a176204505adda 2024-11-19T12:20:55,266 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/2cad64868a364639b583990e8d9a4cf8 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/2cad64868a364639b583990e8d9a4cf8 2024-11-19T12:20:55,266 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/69a73903de5845bdabd2e31f7375ee03 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/69a73903de5845bdabd2e31f7375ee03 2024-11-19T12:20:55,267 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/5c700ac7893046ffb62e0ec5b3b4ebe3 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/5c700ac7893046ffb62e0ec5b3b4ebe3 2024-11-19T12:20:55,268 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/74152bbacdb346f4b31e8d0b32e9589a to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/74152bbacdb346f4b31e8d0b32e9589a 2024-11-19T12:20:55,269 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/246addb4c195474e9e32d1afc59d459d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/246addb4c195474e9e32d1afc59d459d 2024-11-19T12:20:55,269 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/29685e5271c04813a03a17c8eb49667b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/29685e5271c04813a03a17c8eb49667b 2024-11-19T12:20:55,270 DEBUG [StoreCloser-TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/aa0eae2ae013401c8c5125e33b8ae129 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/aa0eae2ae013401c8c5125e33b8ae129 2024-11-19T12:20:55,273 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/recovered.edits/329.seqid, newMaxSeqId=329, maxSeqId=4 2024-11-19T12:20:55,273 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9. 2024-11-19T12:20:55,273 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1635): Region close journal for 7d5f97e10ca89010b1b0ccd9ef5577c9: 2024-11-19T12:20:55,275 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(170): Closed 7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,275 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=177 updating hbase:meta row=7d5f97e10ca89010b1b0ccd9ef5577c9, regionState=CLOSED 2024-11-19T12:20:55,277 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-11-19T12:20:55,277 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; CloseRegionProcedure 7d5f97e10ca89010b1b0ccd9ef5577c9, server=af314c41f984,36047,1732018661455 in 1.8570 sec 2024-11-19T12:20:55,278 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=177, resume processing ppid=176 2024-11-19T12:20:55,278 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, ppid=176, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7d5f97e10ca89010b1b0ccd9ef5577c9, UNASSIGN in 1.8600 sec 2024-11-19T12:20:55,279 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-19T12:20:55,279 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8620 sec 2024-11-19T12:20:55,279 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732018855279"}]},"ts":"1732018855279"} 2024-11-19T12:20:55,280 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-19T12:20:55,282 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-19T12:20:55,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8710 sec 2024-11-19T12:20:55,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-19T12:20:55,516 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-19T12:20:55,516 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-19T12:20:55,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:20:55,517 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:20:55,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-19T12:20:55,518 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=179, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:20:55,520 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,521 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C, FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/recovered.edits] 2024-11-19T12:20:55,522 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/346ebb34bc6a4a148a6593054e75328b to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/346ebb34bc6a4a148a6593054e75328b 2024-11-19T12:20:55,523 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/d48a67cc9db54a63ab761e1f31851ea6 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/A/d48a67cc9db54a63ab761e1f31851ea6 2024-11-19T12:20:55,524 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/abc243d5c88c4baf9398b9d468a51987 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/abc243d5c88c4baf9398b9d468a51987 2024-11-19T12:20:55,525 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/de8bda210aa440cab8940d4a9152505d to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/B/de8bda210aa440cab8940d4a9152505d 2024-11-19T12:20:55,527 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/46850e0f14ee4367878eb62a0073fa48 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/46850e0f14ee4367878eb62a0073fa48 2024-11-19T12:20:55,527 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/7bf165dee3614851aea9126b7db06dd1 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/C/7bf165dee3614851aea9126b7db06dd1 2024-11-19T12:20:55,529 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/recovered.edits/329.seqid to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9/recovered.edits/329.seqid 2024-11-19T12:20:55,529 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/default/TestAcidGuarantees/7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,529 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-19T12:20:55,530 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-19T12:20:55,530 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-19T12:20:55,532 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411190af4129bebb44606b44a9d1650e3bda8_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411190af4129bebb44606b44a9d1650e3bda8_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,533 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111922317a31fb5a478ca1bdfab5f8d5d87a_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111922317a31fb5a478ca1bdfab5f8d5d87a_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,534 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111935fd9c10cba84e958cc58a6a9f7ee3bc_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111935fd9c10cba84e958cc58a6a9f7ee3bc_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,534 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119441f940ca823444bb2ff86157dbaa77a_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119441f940ca823444bb2ff86157dbaa77a_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,535 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411194a6595d887ff47b19b65a4e6162cb46a_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411194a6595d887ff47b19b65a4e6162cb46a_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,536 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411194ef84fa10c0a4bf0aecc79761e52875d_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411194ef84fa10c0a4bf0aecc79761e52875d_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,537 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111954bd0c8fc6d14eaf8cfa1fa1ced2d347_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111954bd0c8fc6d14eaf8cfa1fa1ced2d347_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,537 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411196471a0d30bb04ae0815e94b07543dfd7_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411196471a0d30bb04ae0815e94b07543dfd7_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,538 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111975b545ff57804405817499837e06c0bb_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111975b545ff57804405817499837e06c0bb_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,539 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411197f09a4e052b6484ca0f20fa6a11b2627_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411197f09a4e052b6484ca0f20fa6a11b2627_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,540 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119918d38d792a649459ffc171811b00e02_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119918d38d792a649459ffc171811b00e02_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,540 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411199435f6278a0b47b5b561a7749c7003e4_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411199435f6278a0b47b5b561a7749c7003e4_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,541 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119affe47dc10fb481c99d0bcb27e6f97c7_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119affe47dc10fb481c99d0bcb27e6f97c7_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,542 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119d33cf989ae654a8d96191278031089ef_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119d33cf989ae654a8d96191278031089ef_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,543 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119d8fc2a5a83704925a101ce286dc58b0d_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119d8fc2a5a83704925a101ce286dc58b0d_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,544 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119f607e4dc044c4568b9487458dd3b6705_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119f607e4dc044c4568b9487458dd3b6705_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,544 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119fd8de8b7296e422386a63be83efdf335_7d5f97e10ca89010b1b0ccd9ef5577c9 to hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241119fd8de8b7296e422386a63be83efdf335_7d5f97e10ca89010b1b0ccd9ef5577c9 2024-11-19T12:20:55,545 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-19T12:20:55,546 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=179, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:20:55,548 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-19T12:20:55,549 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-19T12:20:55,550 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=179, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:20:55,550 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-19T12:20:55,550 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732018855550"}]},"ts":"9223372036854775807"} 2024-11-19T12:20:55,551 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-19T12:20:55,551 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7d5f97e10ca89010b1b0ccd9ef5577c9, NAME => 'TestAcidGuarantees,,1732018819388.7d5f97e10ca89010b1b0ccd9ef5577c9.', STARTKEY => '', ENDKEY => ''}] 2024-11-19T12:20:55,551 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-19T12:20:55,551 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732018855551"}]},"ts":"9223372036854775807"} 2024-11-19T12:20:55,553 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-19T12:20:55,554 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=179, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-19T12:20:55,555 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 38 msec 2024-11-19T12:20:55,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-19T12:20:55,618 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-11-19T12:20:55,627 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=236 (was 237), OpenFileDescriptor=451 (was 443) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=280 (was 329), ProcessCount=9 (was 9), AvailableMemoryMB=3200 (was 3222) 2024-11-19T12:20:55,627 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-19T12:20:55,627 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-19T12:20:55,627 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x63dfbe60 to 127.0.0.1:64186 2024-11-19T12:20:55,627 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:55,627 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T12:20:55,627 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=978267889, stopped=false 2024-11-19T12:20:55,627 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=af314c41f984,37977,1732018660782 2024-11-19T12:20:55,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:20:55,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:20:55,629 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-19T12:20:55,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:20:55,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:20:55,630 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:55,630 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'af314c41f984,36047,1732018661455' ***** 2024-11-19T12:20:55,630 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-19T12:20:55,630 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:20:55,630 INFO [RS:0;af314c41f984:36047 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T12:20:55,630 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:20:55,630 INFO [RS:0;af314c41f984:36047 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T12:20:55,630 INFO [RS:0;af314c41f984:36047 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T12:20:55,630 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-19T12:20:55,630 INFO [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(3579): Received CLOSE for dec5b3d3b76c616b09a7c531fa488ba4 2024-11-19T12:20:55,631 INFO [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1224): stopping server af314c41f984,36047,1732018661455 2024-11-19T12:20:55,631 DEBUG [RS:0;af314c41f984:36047 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:55,631 INFO [RS:0;af314c41f984:36047 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T12:20:55,631 INFO [RS:0;af314c41f984:36047 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T12:20:55,631 INFO [RS:0;af314c41f984:36047 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T12:20:55,631 INFO [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-19T12:20:55,631 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing dec5b3d3b76c616b09a7c531fa488ba4, disabling compactions & flushes 2024-11-19T12:20:55,631 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4. 2024-11-19T12:20:55,631 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4. 2024-11-19T12:20:55,631 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4. after waiting 0 ms 2024-11-19T12:20:55,631 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4. 2024-11-19T12:20:55,631 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing dec5b3d3b76c616b09a7c531fa488ba4 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-19T12:20:55,631 INFO [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-19T12:20:55,631 DEBUG [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1603): Online Regions={dec5b3d3b76c616b09a7c531fa488ba4=hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4., 1588230740=hbase:meta,,1.1588230740} 2024-11-19T12:20:55,632 DEBUG [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:20:55,632 INFO [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-19T12:20:55,632 DEBUG [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-19T12:20:55,632 DEBUG [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:20:55,632 DEBUG [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:20:55,632 INFO [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-19T12:20:55,635 DEBUG [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, dec5b3d3b76c616b09a7c531fa488ba4 2024-11-19T12:20:55,647 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/namespace/dec5b3d3b76c616b09a7c531fa488ba4/.tmp/info/69da0c120d4748688e57c74cd543ac64 is 45, key is default/info:d/1732018666370/Put/seqid=0 2024-11-19T12:20:55,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742444_1620 (size=5037) 2024-11-19T12:20:55,657 DEBUG [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740/.tmp/info/542dc9dfd0a14b1ba73a614121248696 is 143, key is hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4./info:regioninfo/1732018666261/Put/seqid=0 2024-11-19T12:20:55,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742445_1621 (size=7725) 2024-11-19T12:20:55,709 INFO [regionserver/af314c41f984:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:20:55,835 DEBUG [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, dec5b3d3b76c616b09a7c531fa488ba4 2024-11-19T12:20:56,035 DEBUG [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, dec5b3d3b76c616b09a7c531fa488ba4 2024-11-19T12:20:56,050 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/namespace/dec5b3d3b76c616b09a7c531fa488ba4/.tmp/info/69da0c120d4748688e57c74cd543ac64 2024-11-19T12:20:56,053 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/namespace/dec5b3d3b76c616b09a7c531fa488ba4/.tmp/info/69da0c120d4748688e57c74cd543ac64 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/namespace/dec5b3d3b76c616b09a7c531fa488ba4/info/69da0c120d4748688e57c74cd543ac64 2024-11-19T12:20:56,056 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/namespace/dec5b3d3b76c616b09a7c531fa488ba4/info/69da0c120d4748688e57c74cd543ac64, entries=2, sequenceid=6, filesize=4.9 K 2024-11-19T12:20:56,056 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for dec5b3d3b76c616b09a7c531fa488ba4 in 425ms, sequenceid=6, compaction requested=false 2024-11-19T12:20:56,059 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/namespace/dec5b3d3b76c616b09a7c531fa488ba4/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-19T12:20:56,060 INFO [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4. 2024-11-19T12:20:56,060 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for dec5b3d3b76c616b09a7c531fa488ba4: 2024-11-19T12:20:56,060 DEBUG [RS_CLOSE_REGION-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732018665410.dec5b3d3b76c616b09a7c531fa488ba4. 2024-11-19T12:20:56,061 INFO [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740/.tmp/info/542dc9dfd0a14b1ba73a614121248696 2024-11-19T12:20:56,078 DEBUG [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740/.tmp/rep_barrier/46c96b6c769245d2bb12e4a0cc37b8ca is 102, key is TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc./rep_barrier:/1732018694289/DeleteFamily/seqid=0 2024-11-19T12:20:56,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742446_1622 (size=6025) 2024-11-19T12:20:56,236 DEBUG [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-19T12:20:56,291 INFO [regionserver/af314c41f984:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T12:20:56,291 INFO [regionserver/af314c41f984:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T12:20:56,436 DEBUG [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-19T12:20:56,481 INFO [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740/.tmp/rep_barrier/46c96b6c769245d2bb12e4a0cc37b8ca 2024-11-19T12:20:56,499 DEBUG [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740/.tmp/table/0e8860e6b8a0452298a6a881d2d29e97 is 96, key is TestAcidGuarantees,,1732018666620.1bd7f6746cebf2fb7e39737ab25d16cc./table:/1732018694289/DeleteFamily/seqid=0 2024-11-19T12:20:56,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742447_1623 (size=5942) 2024-11-19T12:20:56,636 INFO [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-19T12:20:56,636 DEBUG [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-19T12:20:56,636 DEBUG [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-19T12:20:56,836 DEBUG [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-19T12:20:56,902 INFO [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740/.tmp/table/0e8860e6b8a0452298a6a881d2d29e97 2024-11-19T12:20:56,905 DEBUG [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740/.tmp/info/542dc9dfd0a14b1ba73a614121248696 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740/info/542dc9dfd0a14b1ba73a614121248696 2024-11-19T12:20:56,907 INFO [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740/info/542dc9dfd0a14b1ba73a614121248696, entries=22, sequenceid=93, filesize=7.5 K 2024-11-19T12:20:56,908 DEBUG [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740/.tmp/rep_barrier/46c96b6c769245d2bb12e4a0cc37b8ca as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740/rep_barrier/46c96b6c769245d2bb12e4a0cc37b8ca 2024-11-19T12:20:56,911 INFO [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740/rep_barrier/46c96b6c769245d2bb12e4a0cc37b8ca, entries=6, sequenceid=93, filesize=5.9 K 2024-11-19T12:20:56,911 DEBUG [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740/.tmp/table/0e8860e6b8a0452298a6a881d2d29e97 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740/table/0e8860e6b8a0452298a6a881d2d29e97 2024-11-19T12:20:56,913 INFO [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740/table/0e8860e6b8a0452298a6a881d2d29e97, entries=9, sequenceid=93, filesize=5.8 K 2024-11-19T12:20:56,914 INFO [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1282ms, sequenceid=93, compaction requested=false 2024-11-19T12:20:56,917 DEBUG [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-19T12:20:56,918 DEBUG [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:20:56,918 INFO [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-19T12:20:56,918 DEBUG [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-19T12:20:56,918 DEBUG [RS_CLOSE_META-regionserver/af314c41f984:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T12:20:57,036 INFO [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1250): stopping server af314c41f984,36047,1732018661455; all regions closed. 2024-11-19T12:20:57,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741834_1010 (size=26050) 2024-11-19T12:20:57,042 DEBUG [RS:0;af314c41f984:36047 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/oldWALs 2024-11-19T12:20:57,042 INFO [RS:0;af314c41f984:36047 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL af314c41f984%2C36047%2C1732018661455.meta:.meta(num 1732018665102) 2024-11-19T12:20:57,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741833_1009 (size=14064234) 2024-11-19T12:20:57,045 DEBUG [RS:0;af314c41f984:36047 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/oldWALs 2024-11-19T12:20:57,045 INFO [RS:0;af314c41f984:36047 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL af314c41f984%2C36047%2C1732018661455:(num 1732018664470) 2024-11-19T12:20:57,045 DEBUG [RS:0;af314c41f984:36047 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:57,045 INFO [RS:0;af314c41f984:36047 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:20:57,045 INFO [RS:0;af314c41f984:36047 {}] hbase.ChoreService(370): Chore service for: regionserver/af314c41f984:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-11-19T12:20:57,045 INFO [regionserver/af314c41f984:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-19T12:20:57,046 INFO [RS:0;af314c41f984:36047 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36047 2024-11-19T12:20:57,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:20:57,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/af314c41f984,36047,1732018661455 2024-11-19T12:20:57,055 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [af314c41f984,36047,1732018661455] 2024-11-19T12:20:57,055 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing af314c41f984,36047,1732018661455; numProcessing=1 2024-11-19T12:20:57,056 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/af314c41f984,36047,1732018661455 already deleted, retry=false 2024-11-19T12:20:57,056 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; af314c41f984,36047,1732018661455 expired; onlineServers=0 2024-11-19T12:20:57,056 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'af314c41f984,37977,1732018660782' ***** 2024-11-19T12:20:57,056 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T12:20:57,056 DEBUG [M:0;af314c41f984:37977 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bab1a3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=af314c41f984/172.17.0.2:0 2024-11-19T12:20:57,056 INFO [M:0;af314c41f984:37977 {}] regionserver.HRegionServer(1224): stopping server af314c41f984,37977,1732018660782 2024-11-19T12:20:57,056 INFO [M:0;af314c41f984:37977 {}] regionserver.HRegionServer(1250): stopping server af314c41f984,37977,1732018660782; all regions closed. 2024-11-19T12:20:57,056 DEBUG [M:0;af314c41f984:37977 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:20:57,056 DEBUG [M:0;af314c41f984:37977 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T12:20:57,056 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T12:20:57,056 DEBUG [M:0;af314c41f984:37977 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T12:20:57,056 DEBUG [master/af314c41f984:0:becomeActiveMaster-HFileCleaner.large.0-1732018664154 {}] cleaner.HFileCleaner(306): Exit Thread[master/af314c41f984:0:becomeActiveMaster-HFileCleaner.large.0-1732018664154,5,FailOnTimeoutGroup] 2024-11-19T12:20:57,056 DEBUG [master/af314c41f984:0:becomeActiveMaster-HFileCleaner.small.0-1732018664164 {}] cleaner.HFileCleaner(306): Exit Thread[master/af314c41f984:0:becomeActiveMaster-HFileCleaner.small.0-1732018664164,5,FailOnTimeoutGroup] 2024-11-19T12:20:57,057 INFO [M:0;af314c41f984:37977 {}] hbase.ChoreService(370): Chore service for: master/af314c41f984:0 had [] on shutdown 2024-11-19T12:20:57,057 DEBUG [M:0;af314c41f984:37977 {}] master.HMaster(1733): Stopping service threads 2024-11-19T12:20:57,057 INFO [M:0;af314c41f984:37977 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T12:20:57,057 ERROR [M:0;af314c41f984:37977 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (1147363369) connection to localhost/127.0.0.1:46379 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:46379,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-19T12:20:57,057 INFO [M:0;af314c41f984:37977 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T12:20:57,058 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T12:20:57,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T12:20:57,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:20:57,058 DEBUG [M:0;af314c41f984:37977 {}] zookeeper.ZKUtil(347): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T12:20:57,058 WARN [M:0;af314c41f984:37977 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T12:20:57,058 INFO [M:0;af314c41f984:37977 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-19T12:20:57,058 INFO [M:0;af314c41f984:37977 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T12:20:57,058 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T12:20:57,058 DEBUG [M:0;af314c41f984:37977 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:20:57,058 INFO [M:0;af314c41f984:37977 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:20:57,058 DEBUG [M:0;af314c41f984:37977 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:20:57,058 DEBUG [M:0;af314c41f984:37977 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:20:57,058 DEBUG [M:0;af314c41f984:37977 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:20:57,059 INFO [M:0;af314c41f984:37977 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=804.77 KB heapSize=992.56 KB 2024-11-19T12:20:57,075 DEBUG [M:0;af314c41f984:37977 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c1071207f4ca48ceb804b1393cc6153e is 82, key is hbase:meta,,1/info:regioninfo/1732018665275/Put/seqid=0 2024-11-19T12:20:57,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742448_1624 (size=5672) 2024-11-19T12:20:57,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:20:57,155 INFO [RS:0;af314c41f984:36047 {}] regionserver.HRegionServer(1307): Exiting; stopping=af314c41f984,36047,1732018661455; zookeeper connection closed. 2024-11-19T12:20:57,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36047-0x1000fb71b770001, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:20:57,155 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@75bc9d3c {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@75bc9d3c 2024-11-19T12:20:57,156 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T12:20:57,478 INFO [M:0;af314c41f984:37977 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2326 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c1071207f4ca48ceb804b1393cc6153e 2024-11-19T12:20:57,498 DEBUG [M:0;af314c41f984:37977 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2e88aaa93a8c402aab6bb966a4ca3c98 is 2278, key is \x00\x00\x00\x00\x00\x00\x00e/proc:d/1732018756106/Put/seqid=0 2024-11-19T12:20:57,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742449_1625 (size=45090) 2024-11-19T12:20:57,902 INFO [M:0;af314c41f984:37977 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=804.22 KB at sequenceid=2326 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2e88aaa93a8c402aab6bb966a4ca3c98 2024-11-19T12:20:57,904 INFO [M:0;af314c41f984:37977 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2e88aaa93a8c402aab6bb966a4ca3c98 2024-11-19T12:20:57,920 DEBUG [M:0;af314c41f984:37977 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d3ec32ae81804a6db6f83889777fa74d is 69, key is af314c41f984,36047,1732018661455/rs:state/1732018664186/Put/seqid=0 2024-11-19T12:20:57,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073742450_1626 (size=5156) 2024-11-19T12:20:58,324 INFO [M:0;af314c41f984:37977 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2326 (bloomFilter=true), to=hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d3ec32ae81804a6db6f83889777fa74d 2024-11-19T12:20:58,327 DEBUG [M:0;af314c41f984:37977 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c1071207f4ca48ceb804b1393cc6153e as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c1071207f4ca48ceb804b1393cc6153e 2024-11-19T12:20:58,329 INFO [M:0;af314c41f984:37977 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c1071207f4ca48ceb804b1393cc6153e, entries=8, sequenceid=2326, filesize=5.5 K 2024-11-19T12:20:58,329 DEBUG [M:0;af314c41f984:37977 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2e88aaa93a8c402aab6bb966a4ca3c98 as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2e88aaa93a8c402aab6bb966a4ca3c98 2024-11-19T12:20:58,332 INFO [M:0;af314c41f984:37977 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2e88aaa93a8c402aab6bb966a4ca3c98 2024-11-19T12:20:58,332 INFO [M:0;af314c41f984:37977 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2e88aaa93a8c402aab6bb966a4ca3c98, entries=179, sequenceid=2326, filesize=44.0 K 2024-11-19T12:20:58,332 DEBUG [M:0;af314c41f984:37977 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d3ec32ae81804a6db6f83889777fa74d as hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d3ec32ae81804a6db6f83889777fa74d 2024-11-19T12:20:58,335 INFO [M:0;af314c41f984:37977 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46379/user/jenkins/test-data/59682b67-97bb-ddb5-4bf1-353ca5a84a22/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d3ec32ae81804a6db6f83889777fa74d, entries=1, sequenceid=2326, filesize=5.0 K 2024-11-19T12:20:58,335 INFO [M:0;af314c41f984:37977 {}] regionserver.HRegion(3040): Finished flush of dataSize ~804.77 KB/824087, heapSize ~992.27 KB/1016080, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1277ms, sequenceid=2326, compaction requested=false 2024-11-19T12:20:58,337 INFO [M:0;af314c41f984:37977 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:20:58,337 DEBUG [M:0;af314c41f984:37977 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-19T12:20:58,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46369 is added to blk_1073741830_1006 (size=976516) 2024-11-19T12:20:58,339 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-19T12:20:58,339 INFO [M:0;af314c41f984:37977 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-19T12:20:58,339 INFO [M:0;af314c41f984:37977 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:37977 2024-11-19T12:20:58,341 DEBUG [M:0;af314c41f984:37977 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/af314c41f984,37977,1732018660782 already deleted, retry=false 2024-11-19T12:20:58,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:20:58,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x1000fb71b770000, quorum=127.0.0.1:64186, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:20:58,442 INFO [M:0;af314c41f984:37977 {}] regionserver.HRegionServer(1307): Exiting; stopping=af314c41f984,37977,1732018660782; zookeeper connection closed. 2024-11-19T12:20:58,446 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@29607158{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:20:58,449 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@76b7aca8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:20:58,449 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:20:58,449 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74536f23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:20:58,449 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ac85cee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/hadoop.log.dir/,STOPPED} 2024-11-19T12:20:58,452 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:20:58,452 WARN [BP-1970135351-172.17.0.2-1732018658116 heartbeating to localhost/127.0.0.1:46379 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:20:58,452 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:20:58,452 WARN [BP-1970135351-172.17.0.2-1732018658116 heartbeating to localhost/127.0.0.1:46379 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1970135351-172.17.0.2-1732018658116 (Datanode Uuid 204d5fbe-fbbf-4948-802d-73a762d9c991) service to localhost/127.0.0.1:46379 2024-11-19T12:20:58,454 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/cluster_516133e5-12a5-9390-7554-f37f555213db/dfs/data/data1/current/BP-1970135351-172.17.0.2-1732018658116 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:20:58,455 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/cluster_516133e5-12a5-9390-7554-f37f555213db/dfs/data/data2/current/BP-1970135351-172.17.0.2-1732018658116 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:20:58,455 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:20:58,463 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6904431c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:20:58,464 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20178447{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:20:58,464 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:20:58,464 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@704acb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:20:58,464 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@106ffc0e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f9653b51-2934-3d9e-e793-de9dd1a7f255/hadoop.log.dir/,STOPPED} 2024-11-19T12:20:58,486 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-19T12:20:58,601 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down